2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
74 * DEPRECATED sysctl system call code
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
99 #include <sys/disklabel.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
106 #include <security/audit/audit.h>
107 #include <kern/kalloc.h>
109 #include <mach/machine.h>
110 #include <mach/mach_host.h>
111 #include <mach/mach_types.h>
112 #include <mach/vm_param.h>
113 #include <kern/mach_param.h>
114 #include <kern/task.h>
115 #include <kern/thread.h>
116 #include <kern/lock.h>
117 #include <kern/processor.h>
118 #include <kern/debug.h>
119 #include <vm/vm_kern.h>
120 #include <vm/vm_map.h>
121 #include <mach/host_info.h>
123 #include <sys/mount_internal.h>
124 #include <sys/kdebug.h>
125 #include <sys/sysproto.h>
127 #include <IOKit/IOPlatformExpert.h>
128 #include <pexpert/pexpert.h>
130 #include <machine/machine_routines.h>
131 #include <machine/exec.h>
133 #include <vm/vm_protos.h>
134 #include <sys/imgsrc.h>
136 #if defined(__i386__) || defined(__x86_64__)
137 #include <i386/cpuid.h>
141 #include <sys/kern_memorystatus.h>
145 * deliberately setting max requests to really high number
146 * so that runaway settings do not cause MALLOC overflows
148 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
150 extern sysctlfn net_sysctl
;
151 extern sysctlfn cpu_sysctl
;
152 extern int aio_max_requests
;
153 extern int aio_max_requests_per_process
;
154 extern int aio_worker_threads
;
155 extern int lowpri_IO_window_msecs
;
156 extern int lowpri_IO_delay_msecs
;
157 extern int nx_enabled
;
158 extern int speculative_reads_disabled
;
159 extern int ignore_is_ssd
;
160 extern unsigned int speculative_prefetch_max
;
161 extern unsigned int speculative_prefetch_max_iosize
;
162 extern unsigned int preheat_pages_max
;
163 extern unsigned int preheat_pages_min
;
164 extern long numvnodes
;
166 extern unsigned int vm_max_delayed_work_limit
;
167 extern unsigned int vm_max_batch
;
169 extern unsigned int vm_page_free_min
;
170 extern unsigned int vm_page_free_target
;
171 extern unsigned int vm_page_free_reserved
;
172 extern unsigned int vm_page_speculative_percentage
;
173 extern unsigned int vm_page_speculative_q_age_ms
;
176 * Conditionally allow dtrace to see these functions for debugging purposes.
184 #define STATIC static
187 extern boolean_t mach_timer_coalescing_enabled
;
190 fill_user32_eproc(proc_t
, struct user32_eproc
*__restrict
);
192 fill_user32_externproc(proc_t
, struct user32_extern_proc
*__restrict
);
194 fill_user64_eproc(proc_t
, struct user64_eproc
*__restrict
);
196 fill_user64_proc(proc_t
, struct user64_kinfo_proc
*__restrict
);
198 fill_user64_externproc(proc_t
, struct user64_extern_proc
*__restrict
);
200 fill_user32_proc(proc_t
, struct user32_kinfo_proc
*__restrict
);
203 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t * sizep
);
209 pcsamples_ops(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
211 __private_extern__ kern_return_t
212 reset_vmobjectcache(unsigned int val1
, unsigned int val2
);
214 sysctl_procargs(int *name
, u_int namelen
, user_addr_t where
,
215 size_t *sizep
, proc_t cur_proc
);
217 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
,
218 proc_t cur_proc
, int argc_yes
);
220 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
,
221 size_t newlen
, void *sp
, int len
);
223 STATIC
int sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
);
224 STATIC
int sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
);
225 STATIC
int sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
);
226 STATIC
int sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
);
227 STATIC
int sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
);
229 STATIC
int sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
);
231 int sysdoproc_callback(proc_t p
, void *arg
);
234 /* forward declarations for non-static STATIC */
235 STATIC
void fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
);
236 STATIC
void fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
);
237 STATIC
int sysctl_handle_kern_threadname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
238 STATIC
int sysctl_sched_stats(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
239 STATIC
int sysctl_sched_stats_enable(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
240 STATIC
int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
;
241 STATIC
int sysctl_dotranslate SYSCTL_HANDLER_ARGS
;
242 STATIC
int sysctl_doaffinity SYSCTL_HANDLER_ARGS
;
244 STATIC
int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
;
245 #endif /* COUNT_SYSCALLS */
247 STATIC
int sysctl_doprocargs SYSCTL_HANDLER_ARGS
;
248 #endif /* !CONFIG_EMBEDDED */
249 STATIC
int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
;
250 STATIC
int sysctl_prochandle SYSCTL_HANDLER_ARGS
;
252 STATIC
int sysctl_dodebug SYSCTL_HANDLER_ARGS
;
254 STATIC
int sysctl_aiomax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
255 STATIC
int sysctl_aioprocmax(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
256 STATIC
int sysctl_aiothreads(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
257 STATIC
int sysctl_maxproc(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
258 STATIC
int sysctl_osversion(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
259 STATIC
int sysctl_sysctl_bootargs(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
260 STATIC
int sysctl_maxvnodes(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
261 STATIC
int sysctl_securelvl(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
262 STATIC
int sysctl_domainname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
263 STATIC
int sysctl_hostname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
264 STATIC
int sysctl_procname(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
265 STATIC
int sysctl_boottime(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
266 STATIC
int sysctl_symfile(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
268 STATIC
int sysctl_netboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
270 #ifdef CONFIG_IMGSRC_ACCESS
271 STATIC
int sysctl_imgsrcdev(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
273 STATIC
int sysctl_usrstack(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
274 STATIC
int sysctl_usrstack64(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
275 STATIC
int sysctl_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
276 STATIC
int sysctl_suid_coredump(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
277 STATIC
int sysctl_delayterm(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
278 STATIC
int sysctl_rage_vnode(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
279 STATIC
int sysctl_kern_check_openevt(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
280 STATIC
int sysctl_nx(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
281 STATIC
int sysctl_loadavg(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
282 STATIC
int sysctl_vm_toggle_address_reuse(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
283 STATIC
int sysctl_swapusage(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
284 #if defined(__i386__) || defined(__x86_64__)
285 STATIC
int sysctl_sysctl_exec_affinity(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
287 STATIC
int fetch_process_cputype( proc_t cur_proc
, int *name
, u_int namelen
, cpu_type_t
*cputype
);
288 STATIC
int sysctl_sysctl_native(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
289 STATIC
int sysctl_sysctl_cputype(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
290 STATIC
int sysctl_safeboot(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
291 STATIC
int sysctl_singleuser(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
292 STATIC
int sysctl_slide(struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
);
295 extern void IORegistrySetOSBuildVersion(char * build_version
);
298 fill_loadavg64(struct loadavg
*la
, struct user64_loadavg
*la64
)
300 la64
->ldavg
[0] = la
->ldavg
[0];
301 la64
->ldavg
[1] = la
->ldavg
[1];
302 la64
->ldavg
[2] = la
->ldavg
[2];
303 la64
->fscale
= (user64_long_t
)la
->fscale
;
307 fill_loadavg32(struct loadavg
*la
, struct user32_loadavg
*la32
)
309 la32
->ldavg
[0] = la
->ldavg
[0];
310 la32
->ldavg
[1] = la
->ldavg
[1];
311 la32
->ldavg
[2] = la
->ldavg
[2];
312 la32
->fscale
= (user32_long_t
)la
->fscale
;
318 * Description: Wire down the callers address map on behalf of sysctl's
319 * that perform their own copy operations while holding
320 * locks e.g. in the paging path, which could lead to a
321 * deadlock, or while holding a spinlock.
323 * Parameters: addr User buffer address
324 * len User buffer length
327 * vslock:ENOMEM Insufficient physical pages to wire
328 * vslock:EACCES Bad protection mode
329 * vslock:EINVAL Invalid parameters
331 * Notes: This code is invoked for the first OID element where the
332 * CTLFLAG_LOCKED is not specified for a given OID node
333 * element durng OID traversal, and is held for all
334 * subsequent node traversals, and only released after the
335 * leaf node handler invocation is complete.
337 * Legacy: For legacy scyctl's provided by third party code which
338 * expect funnel protection for calls into their code, this
339 * routine will also take the funnel, which will also only
340 * be released after the leaf node handler is complete.
342 * This is to support legacy 32 bit BSD KEXTs and legacy 32
343 * bit single threaded filesystem KEXTs and similar code
344 * which relies on funnel protection, e.g. for things like
345 * FSID based sysctl's.
347 * NEW CODE SHOULD NOT RELY ON THIS BEHAVIOUR! IT WILL BE
348 * REMOVED IN A FUTURE RELASE OF Mac OS X!
350 * Bugs: This routine does nothing with the new_addr and new_len
351 * at present, but it should, since read from the user space
352 * process adddress space which could potentially trigger
353 * paging may also be occurring deep down. This is due to
354 * a current limitation of the vslock() routine, which will
355 * always request a wired mapping be read/write, due to not
356 * taking an access mode parameter. Note that this could
357 * also cause problems for output on architectures where
358 * write access does not require read acccess if the current
359 * mapping lacks read access.
361 * XXX: To be moved to kern_newsysctl.c to avoid __private_extern__
363 int sysctl_mem_lock(user_addr_t old_addr
, user_size_t old_len
, user_addr_t new_addr
, user_size_t new_len
);
365 sysctl_mem_lock(__unused user_addr_t old_addr
, __unused user_size_t old_len
, __unused user_addr_t new_addr
, __unused user_size_t new_len
)
374 /* sysctl() syscall */
376 __sysctl(proc_t p
, struct __sysctl_args
*uap
, __unused
int32_t *retval
)
378 boolean_t funnel_state
= FALSE
; /* not held if unknown */
380 size_t savelen
= 0, oldlen
= 0, newlen
;
381 int name
[CTL_MAXNAME
];
383 boolean_t vslock_taken
= FALSE
;
384 boolean_t funnel_taken
= FALSE
;
386 kauth_cred_t my_cred
;
390 * all top-level sysctl names are non-terminal
392 if (uap
->namelen
> CTL_MAXNAME
|| uap
->namelen
< 2)
394 error
= copyin(uap
->name
, &name
[0], uap
->namelen
* sizeof(int));
398 AUDIT_ARG(ctlname
, name
, uap
->namelen
);
400 if (proc_is64bit(p
)) {
401 /* uap->newlen is a size_t value which grows to 64 bits
402 * when coming from a 64-bit process. since it's doubtful we'll
403 * have a sysctl newp buffer greater than 4GB we shrink it to size_t
405 newlen
= CAST_DOWN(size_t, uap
->newlen
);
408 newlen
= uap
->newlen
;
412 * XXX TODO: push down rights check for CTL_HW OIDs; most duplicate
413 * XXX it anyway, which is a performance sink, and requires use
414 * XXX of SUID root programs (see <rdar://3915692>).
416 * Note: Opt out of non-leaf node enforcement by removing this
417 * check for the top level OID value, and then adding
418 * CTLFLAG_ANYBODY to the leaf nodes in question. Enforce as
419 * suser for writed in leaf nodes by omitting this flag.
420 * Enforce with a higher granularity by making the leaf node
421 * of type SYSCTL_PROC() in order to provide a procedural
422 * enforcement call site.
424 * NOTE: This function is called prior to any subfunctions being
425 * called with a fallback to userland_sysctl(); as such, this
426 * permissions check here will veto the fallback operation.
428 /* CTL_UNSPEC is used to get oid to AUTO_OID */
429 if (uap
->new != USER_ADDR_NULL
430 && ((name
[0] == CTL_HW
)
431 || (name
[0] == CTL_VM
))
432 && (error
= suser(kauth_cred_get(), &p
->p_acflag
)))
435 // XXX need to relocate into each terminal instead of leaving this here...
436 // XXX macf preemptory check.
438 my_cred
= kauth_cred_proc_ref(p
);
439 error
= mac_system_check_sysctl(
445 0, /* XXX 1 for CTL_KERN checks */
449 kauth_cred_unref(&my_cred
);
454 if (uap
->oldlenp
!= USER_ADDR_NULL
) {
455 uint64_t oldlen64
= fuulong(uap
->oldlenp
);
457 oldlen
= CAST_DOWN(size_t, oldlen64
);
459 * If more than 4G, clamp to 4G - useracc() below will catch
460 * with an EFAULT, if it's actually necessary.
462 if (oldlen64
> 0x00000000ffffffffULL
)
463 oldlen
= 0xffffffffUL
;
466 if ((name
[0] == CTL_VFS
|| name
[0] == CTL_VM
)) {
468 * Always take the funnel for CTL_VFS and CTL_VM
470 * XXX We should also take it for any OID without the
471 * XXX CTLFLAG_LOCKED set on it; fix this later!
473 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
477 * XXX Take the vslock() only when we are copying out; this
478 * XXX erroneously assumes that the copy in will not cause
479 * XXX a fault if caled from the paging path due to the
480 * XXX having been recently touched in order to establish
481 * XXX the input data. This is a bad assumption.
483 * Note: This is overkill, but third parties might
484 * already call sysctl internally in KEXTs that
485 * implement mass storage drivers. If you are
486 * writing a new KEXT, don't do that.
488 if(uap
->old
!= USER_ADDR_NULL
) {
489 if (!useracc(uap
->old
, (user_size_t
)oldlen
, B_WRITE
)) {
490 thread_funnel_set(kernel_flock
, funnel_state
);
495 if ((error
= vslock(uap
->old
, (user_size_t
)oldlen
))) {
496 thread_funnel_set(kernel_flock
, funnel_state
);
506 * XXX convert vfs_sysctl subelements to newsysctl; this is hard
507 * XXX because of VFS_NUMMNTOPS being top level.
510 if (name
[0] == CTL_VFS
) {
511 error
= vfs_sysctl(name
+ 1, uap
->namelen
- 1, uap
->old
,
512 &oldlen
, uap
->new, newlen
, p
);
515 if (vslock_taken
== TRUE
) {
516 error1
= vsunlock(uap
->old
, (user_size_t
)savelen
, B_WRITE
);
521 if ( (name
[0] != CTL_VFS
) && (error
== ENOTSUP
) ) {
523 error
= userland_sysctl(p
, name
, uap
->namelen
, uap
->old
, &tmp
,
524 uap
->new, newlen
, &oldlen
);
528 * If we took the funnel, which we only do for CTL_VFS and CTL_VM on
529 * 32 bit architectures, then drop it.
531 * XXX the grabbing and dropping need to move into the leaf nodes,
532 * XXX for sysctl's that are not marked CTLFLAG_LOCKED, but this is
533 * XXX true for the vslock, as well. We have a start at a routine
534 * to wrapper this (above), but it's not turned on. The current code
535 * removed the funnel and the vslock() from all but these two top
536 * level OIDs. Note that VFS only needs to take the funnel if the FS
537 * against which it's operating is not thread safe (but since an FS
538 * can be in the paging path, it still needs to take the vslock()).
541 thread_funnel_set(kernel_flock
, funnel_state
);
543 if ((error
) && (error
!= ENOMEM
))
546 if (uap
->oldlenp
!= USER_ADDR_NULL
)
547 error
= suulong(uap
->oldlenp
, oldlen
);
553 * Attributes stored in the kernel.
555 __private_extern__
char corefilename
[MAXPATHLEN
+1];
556 __private_extern__
int do_coredump
;
557 __private_extern__
int sugid_coredump
;
560 __private_extern__
int do_count_syscalls
;
564 int securelevel
= -1;
570 sysctl_doaffinity SYSCTL_HANDLER_ARGS
572 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
573 int *name
= arg1
; /* oid element argument vector */
574 int namelen
= arg2
; /* number of oid element arguments */
575 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
576 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
577 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
578 // size_t newlen = req->newlen; /* user buffer copy in size */
580 int error
= ENOTSUP
; /* Default to failure */
582 proc_t cur_proc
= current_proc();
587 if (name
[0] == 0 && 1 == namelen
) {
588 error
= sysctl_rdint(oldp
, oldlenp
, newp
,
589 (cur_proc
->p_flag
& P_AFFINITY
) ? 1 : 0);
590 } else if (name
[0] == 1 && 2 == namelen
) {
592 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
594 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
599 /* adjust index so we return the right required/consumed amount */
601 req
->oldidx
+= req
->oldlen
;
605 SYSCTL_PROC(_kern
, KERN_AFFINITY
, affinity
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
606 0, /* Pointer argument (arg1) */
607 0, /* Integer argument (arg2) */
608 sysctl_doaffinity
, /* Handler function */
609 NULL
, /* Data pointer */
613 sysctl_dotranslate SYSCTL_HANDLER_ARGS
615 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
616 int *name
= arg1
; /* oid element argument vector */
617 int namelen
= arg2
; /* number of oid element arguments */
618 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
619 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
620 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
621 // size_t newlen = req->newlen; /* user buffer copy in size */
624 proc_t cur_proc
= current_proc();
626 int istranslated
= 0;
627 kauth_cred_t my_cred
;
633 p
= proc_find(name
[0]);
637 my_cred
= kauth_cred_proc_ref(p
);
638 uid
= kauth_cred_getuid(my_cred
);
639 kauth_cred_unref(&my_cred
);
640 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
641 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
646 istranslated
= (p
->p_flag
& P_TRANSLATED
);
648 error
= sysctl_rdint(oldp
, oldlenp
, newp
,
649 (istranslated
!= 0) ? 1 : 0);
651 /* adjust index so we return the right required/consumed amount */
653 req
->oldidx
+= req
->oldlen
;
658 * XXX make CTLFLAG_RW so sysctl_rdint() will EPERM on attempts to write;
659 * XXX this may not be necessary.
661 SYSCTL_PROC(_kern
, KERN_TRANSLATE
, translate
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
662 0, /* Pointer argument (arg1) */
663 0, /* Integer argument (arg2) */
664 sysctl_dotranslate
, /* Handler function */
665 NULL
, /* Data pointer */
669 sysctl_handle_kern_threadname( __unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
670 __unused
int arg2
, struct sysctl_req
*req
)
673 struct uthread
*ut
= get_bsdthread_info(current_thread());
674 user_addr_t oldp
=0, newp
=0;
675 size_t *oldlenp
=NULL
;
679 oldlenp
= &(req
->oldlen
);
681 newlen
= req
->newlen
;
683 /* We want the current length, and maybe the string itself */
685 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
686 size_t currlen
= MAXTHREADNAMESIZE
- 1;
689 /* use length of current thread name */
690 currlen
= strlen(ut
->pth_name
);
692 if(*oldlenp
< currlen
)
694 /* NOTE - we do not copy the NULL terminator */
696 error
= copyout(ut
->pth_name
,oldp
,currlen
);
701 /* return length of thread name minus NULL terminator (just like strlen) */
702 req
->oldidx
= currlen
;
705 /* We want to set the name to something */
708 if(newlen
> (MAXTHREADNAMESIZE
- 1))
712 ut
->pth_name
= (char*)kalloc( MAXTHREADNAMESIZE
);
716 bzero(ut
->pth_name
, MAXTHREADNAMESIZE
);
717 error
= copyin(newp
, ut
->pth_name
, newlen
);
725 SYSCTL_PROC(_kern
, KERN_THREADNAME
, threadname
, CTLFLAG_ANYBODY
| CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_handle_kern_threadname
,"A","");
729 sysctl_sched_stats(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
731 host_basic_info_data_t hinfo
;
735 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
736 struct _processor_statistics_np
*buf
;
739 kret
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
740 if (kret
!= KERN_SUCCESS
) {
744 size
= sizeof(struct _processor_statistics_np
) * (hinfo
.logical_cpu_max
+ 2); /* One for RT Queue, One for Fair Share Queue */
746 if (req
->oldlen
< size
) {
750 MALLOC(buf
, struct _processor_statistics_np
*, size
, M_TEMP
, M_ZERO
| M_WAITOK
);
752 kret
= get_sched_statistics(buf
, &size
);
753 if (kret
!= KERN_SUCCESS
) {
758 error
= sysctl_io_opaque(req
, buf
, size
, &changed
);
764 panic("Sched info changed?!");
771 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats
, CTLFLAG_LOCKED
, 0, 0, sysctl_sched_stats
, "-", "");
774 sysctl_sched_stats_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, __unused
struct sysctl_req
*req
)
779 if (req
->newlen
!= sizeof(active
)) {
783 res
= copyin(req
->newptr
, &active
, sizeof(active
));
788 return set_sched_stats_active(active
);
791 SYSCTL_PROC(_kern
, OID_AUTO
, sched_stats_enable
, CTLFLAG_LOCKED
| CTLFLAG_WR
, 0, 0, sysctl_sched_stats_enable
, "-", "");
793 extern int get_kernel_symfile(proc_t
, char **);
796 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
799 extern int syscalls_log
[];
800 extern const char *syscallnames
[];
803 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
805 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
806 __unused
int *name
= arg1
; /* oid element argument vector */
807 __unused
int namelen
= arg2
; /* number of oid element arguments */
808 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
809 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
810 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
811 size_t newlen
= req
->newlen
; /* user buffer copy in size */
816 /* valid values passed in:
817 * = 0 means don't keep called counts for each bsd syscall
818 * > 0 means keep called counts for each bsd syscall
819 * = 2 means dump current counts to the system log
820 * = 3 means reset all counts
821 * for example, to dump current counts:
822 * sysctl -w kern.count_calls=2
824 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, &tmp
);
830 do_count_syscalls
= 1;
832 else if ( tmp
== 0 || tmp
== 2 || tmp
== 3 ) {
834 for ( i
= 0; i
< nsysent
; i
++ ) {
835 if ( syscalls_log
[i
] != 0 ) {
837 printf("%d calls - name %s \n", syscalls_log
[i
], syscallnames
[i
]);
845 do_count_syscalls
= 1;
849 /* adjust index so we return the right required/consumed amount */
851 req
->oldidx
+= req
->oldlen
;
855 SYSCTL_PROC(_kern
, KERN_COUNT_SYSCALLS
, count_syscalls
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
856 0, /* Pointer argument (arg1) */
857 0, /* Integer argument (arg2) */
858 sysctl_docountsyscalls
, /* Handler function */
859 NULL
, /* Data pointer */
861 #endif /* COUNT_SYSCALLS */
865 * Debugging related system variables.
869 #endif /* DIAGNOSTIC */
870 struct ctldebug debug0
, debug1
;
871 struct ctldebug debug2
, debug3
, debug4
;
872 struct ctldebug debug5
, debug6
, debug7
, debug8
, debug9
;
873 struct ctldebug debug10
, debug11
, debug12
, debug13
, debug14
;
874 struct ctldebug debug15
, debug16
, debug17
, debug18
, debug19
;
875 STATIC
struct ctldebug
*debugvars
[CTL_DEBUG_MAXID
] = {
876 &debug0
, &debug1
, &debug2
, &debug3
, &debug4
,
877 &debug5
, &debug6
, &debug7
, &debug8
, &debug9
,
878 &debug10
, &debug11
, &debug12
, &debug13
, &debug14
,
879 &debug15
, &debug16
, &debug17
, &debug18
, &debug19
,
882 sysctl_dodebug SYSCTL_HANDLER_ARGS
884 int cmd
= oidp
->oid_arg2
; /* subcommand*/
885 int *name
= arg1
; /* oid element argument vector */
886 int namelen
= arg2
; /* number of oid element arguments */
887 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
888 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
889 user_addr_t newp
= req
->newptr
; /* user buffer copy in address */
890 size_t newlen
= req
->newlen
; /* user buffer copy in size */
893 struct ctldebug
*cdp
;
895 /* all sysctl names at this level are name and field */
897 return (ENOTSUP
); /* overloaded */
898 if (cmd
< 0 || cmd
>= CTL_DEBUG_MAXID
)
900 cdp
= debugvars
[cmd
];
901 if (cdp
->debugname
== 0)
905 error
= sysctl_rdstring(oldp
, oldlenp
, newp
, cdp
->debugname
);
907 case CTL_DEBUG_VALUE
:
908 error
= sysctl_int(oldp
, oldlenp
, newp
, newlen
, cdp
->debugvar
);
915 /* adjust index so we return the right required/consumed amount */
917 req
->oldidx
+= req
->oldlen
;
922 * XXX We mark this RW instead of RD to let sysctl_rdstring() return the
923 * XXX historical error.
925 SYSCTL_PROC(_debug
, CTL_DEBUG_NAME
, name
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
926 0, /* Pointer argument (arg1) */
927 CTL_DEBUG_NAME
, /* Integer argument (arg2) */
928 sysctl_dodebug
, /* Handler function */
929 NULL
, /* Data pointer */
931 SYSCTL_PROC(_debug
, CTL_DEBUG_VALUE
, value
, CTLTYPE_NODE
|CTLFLAG_RW
| CTLFLAG_LOCKED
,
932 0, /* Pointer argument (arg1) */
933 CTL_DEBUG_VALUE
, /* Integer argument (arg2) */
934 sysctl_dodebug
, /* Handler function */
935 NULL
, /* Data pointer */
940 * The following sysctl_* functions should not be used
941 * any more, as they can only cope with callers in
942 * user mode: Use new-style
950 * Validate parameters and get old / set new parameters
951 * for an integer-valued sysctl function.
954 sysctl_int(user_addr_t oldp
, size_t *oldlenp
,
955 user_addr_t newp
, size_t newlen
, int *valp
)
959 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
961 if (oldp
&& *oldlenp
< sizeof(int))
963 if (newp
&& newlen
!= sizeof(int))
965 *oldlenp
= sizeof(int);
967 error
= copyout(valp
, oldp
, sizeof(int));
968 if (error
== 0 && newp
) {
969 error
= copyin(newp
, valp
, sizeof(int));
970 AUDIT_ARG(value32
, *valp
);
976 * As above, but read-only.
979 sysctl_rdint(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, int val
)
983 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
985 if (oldp
&& *oldlenp
< sizeof(int))
989 *oldlenp
= sizeof(int);
991 error
= copyout((caddr_t
)&val
, oldp
, sizeof(int));
996 * Validate parameters and get old / set new parameters
997 * for an quad(64bit)-valued sysctl function.
1000 sysctl_quad(user_addr_t oldp
, size_t *oldlenp
,
1001 user_addr_t newp
, size_t newlen
, quad_t
*valp
)
1005 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1007 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1009 if (newp
&& newlen
!= sizeof(quad_t
))
1011 *oldlenp
= sizeof(quad_t
);
1013 error
= copyout(valp
, oldp
, sizeof(quad_t
));
1014 if (error
== 0 && newp
)
1015 error
= copyin(newp
, valp
, sizeof(quad_t
));
1020 * As above, but read-only.
1023 sysctl_rdquad(user_addr_t oldp
, size_t *oldlenp
, user_addr_t newp
, quad_t val
)
1027 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1029 if (oldp
&& *oldlenp
< sizeof(quad_t
))
1033 *oldlenp
= sizeof(quad_t
);
1035 error
= copyout((caddr_t
)&val
, oldp
, sizeof(quad_t
));
1040 * Validate parameters and get old / set new parameters
1041 * for a string-valued sysctl function. Unlike sysctl_string, if you
1042 * give it a too small (but larger than 0 bytes) buffer, instead of
1043 * returning ENOMEM, it truncates the returned string to the buffer
1044 * size. This preserves the semantics of some library routines
1045 * implemented via sysctl, which truncate their returned data, rather
1046 * than simply returning an error. The returned string is always NUL
1050 sysctl_trstring(user_addr_t oldp
, size_t *oldlenp
,
1051 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1053 int len
, copylen
, error
= 0;
1055 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1057 copylen
= len
= strlen(str
) + 1;
1058 if (oldp
&& (len
< 0 || *oldlenp
< 1))
1060 if (oldp
&& (*oldlenp
< (size_t)len
))
1061 copylen
= *oldlenp
+ 1;
1062 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1064 *oldlenp
= copylen
- 1; /* deal with NULL strings correctly */
1066 error
= copyout(str
, oldp
, copylen
);
1068 unsigned char c
= 0;
1071 error
= copyout((void *)&c
, oldp
, sizeof(char));
1074 if (error
== 0 && newp
) {
1075 error
= copyin(newp
, str
, newlen
);
1077 AUDIT_ARG(text
, (char *)str
);
1083 * Validate parameters and get old / set new parameters
1084 * for a string-valued sysctl function.
1087 sysctl_string(user_addr_t oldp
, size_t *oldlenp
,
1088 user_addr_t newp
, size_t newlen
, char *str
, int maxlen
)
1092 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1094 len
= strlen(str
) + 1;
1095 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1097 if (newp
&& (maxlen
< 0 || newlen
>= (size_t)maxlen
))
1099 *oldlenp
= len
-1; /* deal with NULL strings correctly */
1101 error
= copyout(str
, oldp
, len
);
1103 if (error
== 0 && newp
) {
1104 error
= copyin(newp
, str
, newlen
);
1106 AUDIT_ARG(text
, (char *)str
);
1112 * As above, but read-only.
1115 sysctl_rdstring(user_addr_t oldp
, size_t *oldlenp
,
1116 user_addr_t newp
, char *str
)
1120 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1122 len
= strlen(str
) + 1;
1123 if (oldp
&& *oldlenp
< (size_t)len
)
1129 error
= copyout(str
, oldp
, len
);
1134 * Validate parameters and get old / set new parameters
1135 * for a structure oriented sysctl function.
1138 sysctl_struct(user_addr_t oldp
, size_t *oldlenp
,
1139 user_addr_t newp
, size_t newlen
, void *sp
, int len
)
1143 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1145 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1147 if (newp
&& (len
< 0 || newlen
> (size_t)len
))
1151 error
= copyout(sp
, oldp
, len
);
1153 if (error
== 0 && newp
)
1154 error
= copyin(newp
, sp
, len
);
1159 * Validate parameters and get old parameters
1160 * for a structure oriented sysctl function.
1163 sysctl_rdstruct(user_addr_t oldp
, size_t *oldlenp
,
1164 user_addr_t newp
, void *sp
, int len
)
1168 if (oldp
!= USER_ADDR_NULL
&& oldlenp
== NULL
)
1170 if (oldp
&& (len
< 0 || *oldlenp
< (size_t)len
))
1176 error
= copyout(sp
, oldp
, len
);
1181 sysdoproc_filt_KERN_PROC_PID(proc_t p
, void * arg
)
1183 if (p
->p_pid
!= (pid_t
)*(int*)arg
)
1190 sysdoproc_filt_KERN_PROC_PGRP(proc_t p
, void * arg
)
1192 if (p
->p_pgrpid
!= (pid_t
)*(int*)arg
)
1199 sysdoproc_filt_KERN_PROC_TTY(proc_t p
, void * arg
)
1201 boolean_t funnel_state
;
1206 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1207 /* This is very racy but list lock is held.. Hmmm. */
1208 if ((p
->p_flag
& P_CONTROLT
) == 0 ||
1209 (p
->p_pgrp
== NULL
) || (p
->p_pgrp
->pg_session
== NULL
) ||
1210 (tp
= SESSION_TP(p
->p_pgrp
->pg_session
)) == TTY_NULL
||
1211 tp
->t_dev
!= (dev_t
)*(int*)arg
)
1216 thread_funnel_set(kernel_flock
, funnel_state
);
1222 sysdoproc_filt_KERN_PROC_UID(proc_t p
, void * arg
)
1224 kauth_cred_t my_cred
;
1227 if (p
->p_ucred
== NULL
)
1229 my_cred
= kauth_cred_proc_ref(p
);
1230 uid
= kauth_cred_getuid(my_cred
);
1231 kauth_cred_unref(&my_cred
);
1233 if (uid
!= (uid_t
)*(int*)arg
)
1241 sysdoproc_filt_KERN_PROC_RUID(proc_t p
, void * arg
)
1243 kauth_cred_t my_cred
;
1246 if (p
->p_ucred
== NULL
)
1248 my_cred
= kauth_cred_proc_ref(p
);
1249 ruid
= kauth_cred_getruid(my_cred
);
1250 kauth_cred_unref(&my_cred
);
1252 if (ruid
!= (uid_t
)*(int*)arg
)
1260 sysdoproc_filt_KERN_PROC_LCID(proc_t p
, void * arg
)
1262 if ((p
->p_lctx
== NULL
) ||
1263 (p
->p_lctx
->lc_id
!= (pid_t
)*(int*)arg
))
1271 * try over estimating by 5 procs
1273 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
1274 struct sysdoproc_args
{
1277 boolean_t is_64_bit
;
1289 sysdoproc_callback(proc_t p
, void *arg
)
1291 struct sysdoproc_args
*args
= arg
;
1293 if (args
->buflen
>= args
->sizeof_kproc
) {
1294 if ((args
->ruidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_RUID(p
, &args
->uidval
) == 0))
1295 return (PROC_RETURNED
);
1296 if ((args
->uidcheck
!= 0) && (sysdoproc_filt_KERN_PROC_UID(p
, &args
->uidval
) == 0))
1297 return (PROC_RETURNED
);
1298 if ((args
->ttycheck
!= 0) && (sysdoproc_filt_KERN_PROC_TTY(p
, &args
->uidval
) == 0))
1299 return (PROC_RETURNED
);
1301 bzero(args
->kprocp
, args
->sizeof_kproc
);
1302 if (args
->is_64_bit
)
1303 fill_user64_proc(p
, args
->kprocp
);
1305 fill_user32_proc(p
, args
->kprocp
);
1306 int error
= copyout(args
->kprocp
, args
->dp
, args
->sizeof_kproc
);
1308 *args
->errorp
= error
;
1309 return (PROC_RETURNED_DONE
);
1311 args
->dp
+= args
->sizeof_kproc
;
1312 args
->buflen
-= args
->sizeof_kproc
;
1314 args
->needed
+= args
->sizeof_kproc
;
1315 return (PROC_RETURNED
);
1318 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, "");
1320 sysctl_prochandle SYSCTL_HANDLER_ARGS
1322 int cmd
= oidp
->oid_arg2
; /* subcommand for multiple nodes */
1323 int *name
= arg1
; /* oid element argument vector */
1324 int namelen
= arg2
; /* number of oid element arguments */
1325 user_addr_t where
= req
->oldptr
;/* user buffer copy out address */
1327 user_addr_t dp
= where
;
1329 int buflen
= where
!= USER_ADDR_NULL
? req
->oldlen
: 0;
1331 boolean_t is_64_bit
= proc_is64bit(current_proc());
1332 struct user32_kinfo_proc user32_kproc
;
1333 struct user64_kinfo_proc user_kproc
;
1336 int (*filterfn
)(proc_t
, void *) = 0;
1337 struct sysdoproc_args args
;
1342 if (namelen
!= 1 && !(namelen
== 0 && cmd
== KERN_PROC_ALL
))
1346 sizeof_kproc
= sizeof(user_kproc
);
1347 kprocp
= &user_kproc
;
1349 sizeof_kproc
= sizeof(user32_kproc
);
1350 kprocp
= &user32_kproc
;
1356 filterfn
= sysdoproc_filt_KERN_PROC_PID
;
1359 case KERN_PROC_PGRP
:
1360 filterfn
= sysdoproc_filt_KERN_PROC_PGRP
;
1371 case KERN_PROC_RUID
:
1376 case KERN_PROC_LCID
:
1377 filterfn
= sysdoproc_filt_KERN_PROC_LCID
;
1384 /* must be kern.proc.<unknown> */
1389 args
.buflen
= buflen
;
1390 args
.kprocp
= kprocp
;
1391 args
.is_64_bit
= is_64_bit
;
1393 args
.needed
= needed
;
1394 args
.errorp
= &error
;
1395 args
.uidcheck
= uidcheck
;
1396 args
.ruidcheck
= ruidcheck
;
1397 args
.ttycheck
= ttycheck
;
1398 args
.sizeof_kproc
= sizeof_kproc
;
1400 args
.uidval
= name
[0];
1402 proc_iterate((PROC_ALLPROCLIST
| PROC_ZOMBPROCLIST
),
1403 sysdoproc_callback
, &args
, filterfn
, name
);
1409 needed
= args
.needed
;
1411 if (where
!= USER_ADDR_NULL
) {
1412 req
->oldlen
= dp
- where
;
1413 if (needed
> req
->oldlen
)
1416 needed
+= KERN_PROCSLOP
;
1417 req
->oldlen
= needed
;
1419 /* adjust index so we return the right required/consumed amount */
1420 req
->oldidx
+= req
->oldlen
;
1425 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
1426 * in the sysctl declaration itself, which comes into the handler function
1427 * as 'oidp->oid_arg2'.
1429 * For these particular sysctls, since they have well known OIDs, we could
1430 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
1431 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
1432 * of a well known value with a common handler function. This is desirable,
1433 * because we want well known values to "go away" at some future date.
1435 * It should be noted that the value of '((int *)arg1)[1]' is used for many
1436 * an integer parameter to the subcommand for many of these sysctls; we'd
1437 * rather have used '((int *)arg1)[0]' for that, or even better, an element
1438 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
1439 * and then use leaf-node permissions enforcement, but that would have
1440 * necessitated modifying user space code to correspond to the interface
1441 * change, and we are striving for binary backward compatibility here; even
1442 * though these are SPI, and not intended for use by user space applications
1443 * which are not themselves system tools or libraries, some applications
1444 * have erroneously used them.
1446 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1447 0, /* Pointer argument (arg1) */
1448 KERN_PROC_ALL
, /* Integer argument (arg2) */
1449 sysctl_prochandle
, /* Handler function */
1450 NULL
, /* Data is size variant on ILP32/LP64 */
1452 SYSCTL_PROC(_kern_proc
, KERN_PROC_PID
, pid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1453 0, /* Pointer argument (arg1) */
1454 KERN_PROC_PID
, /* Integer argument (arg2) */
1455 sysctl_prochandle
, /* Handler function */
1456 NULL
, /* Data is size variant on ILP32/LP64 */
1458 SYSCTL_PROC(_kern_proc
, KERN_PROC_TTY
, tty
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1459 0, /* Pointer argument (arg1) */
1460 KERN_PROC_TTY
, /* Integer argument (arg2) */
1461 sysctl_prochandle
, /* Handler function */
1462 NULL
, /* Data is size variant on ILP32/LP64 */
1464 SYSCTL_PROC(_kern_proc
, KERN_PROC_PGRP
, pgrp
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1465 0, /* Pointer argument (arg1) */
1466 KERN_PROC_PGRP
, /* Integer argument (arg2) */
1467 sysctl_prochandle
, /* Handler function */
1468 NULL
, /* Data is size variant on ILP32/LP64 */
1470 SYSCTL_PROC(_kern_proc
, KERN_PROC_UID
, uid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1471 0, /* Pointer argument (arg1) */
1472 KERN_PROC_UID
, /* Integer argument (arg2) */
1473 sysctl_prochandle
, /* Handler function */
1474 NULL
, /* Data is size variant on ILP32/LP64 */
1476 SYSCTL_PROC(_kern_proc
, KERN_PROC_RUID
, ruid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1477 0, /* Pointer argument (arg1) */
1478 KERN_PROC_RUID
, /* Integer argument (arg2) */
1479 sysctl_prochandle
, /* Handler function */
1480 NULL
, /* Data is size variant on ILP32/LP64 */
1482 SYSCTL_PROC(_kern_proc
, KERN_PROC_LCID
, lcid
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1483 0, /* Pointer argument (arg1) */
1484 KERN_PROC_LCID
, /* Integer argument (arg2) */
1485 sysctl_prochandle
, /* Handler function */
1486 NULL
, /* Data is size variant on ILP32/LP64 */
1491 * Fill in non-zero fields of an eproc structure for the specified process.
1494 fill_user32_eproc(proc_t p
, struct user32_eproc
*__restrict ep
)
1498 struct session
*sessp
;
1499 kauth_cred_t my_cred
;
1502 sessp
= proc_session(p
);
1504 if (pg
!= PGRP_NULL
) {
1505 ep
->e_pgid
= p
->p_pgrpid
;
1506 ep
->e_jobc
= pg
->pg_jobc
;
1507 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
1508 ep
->e_flag
= EPROC_CTTY
;
1512 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1514 ep
->e_ppid
= p
->p_ppid
;
1516 my_cred
= kauth_cred_proc_ref(p
);
1518 /* A fake historical pcred */
1519 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1520 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1521 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1522 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1524 /* A fake historical *kauth_cred_t */
1525 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1526 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1527 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1528 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1529 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
1531 kauth_cred_unref(&my_cred
);
1534 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1535 (tp
= SESSION_TP(sessp
))) {
1536 ep
->e_tdev
= tp
->t_dev
;
1537 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1541 if (sessp
!= SESSION_NULL
) {
1542 if (SESS_LEADER(p
, sessp
))
1543 ep
->e_flag
|= EPROC_SLEADER
;
1544 session_rele(sessp
);
1546 if (pg
!= PGRP_NULL
)
1551 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
1554 fill_user64_eproc(proc_t p
, struct user64_eproc
*__restrict ep
)
1558 struct session
*sessp
;
1559 kauth_cred_t my_cred
;
1562 sessp
= proc_session(p
);
1564 if (pg
!= PGRP_NULL
) {
1565 ep
->e_pgid
= p
->p_pgrpid
;
1566 ep
->e_jobc
= pg
->pg_jobc
;
1567 if (sessp
!= SESSION_NULL
&& sessp
->s_ttyvp
)
1568 ep
->e_flag
= EPROC_CTTY
;
1572 ep
->e_lcid
= p
->p_lctx
->lc_id
;
1574 ep
->e_ppid
= p
->p_ppid
;
1576 my_cred
= kauth_cred_proc_ref(p
);
1578 /* A fake historical pcred */
1579 ep
->e_pcred
.p_ruid
= kauth_cred_getruid(my_cred
);
1580 ep
->e_pcred
.p_svuid
= kauth_cred_getsvuid(my_cred
);
1581 ep
->e_pcred
.p_rgid
= kauth_cred_getrgid(my_cred
);
1582 ep
->e_pcred
.p_svgid
= kauth_cred_getsvgid(my_cred
);
1584 /* A fake historical *kauth_cred_t */
1585 ep
->e_ucred
.cr_ref
= my_cred
->cr_ref
;
1586 ep
->e_ucred
.cr_uid
= kauth_cred_getuid(my_cred
);
1587 ep
->e_ucred
.cr_ngroups
= posix_cred_get(my_cred
)->cr_ngroups
;
1588 bcopy(posix_cred_get(my_cred
)->cr_groups
,
1589 ep
->e_ucred
.cr_groups
, NGROUPS
* sizeof (gid_t
));
1591 kauth_cred_unref(&my_cred
);
1594 if ((p
->p_flag
& P_CONTROLT
) && (sessp
!= SESSION_NULL
) &&
1595 (tp
= SESSION_TP(sessp
))) {
1596 ep
->e_tdev
= tp
->t_dev
;
1597 ep
->e_tpgid
= sessp
->s_ttypgrpid
;
1601 if (sessp
!= SESSION_NULL
) {
1602 if (SESS_LEADER(p
, sessp
))
1603 ep
->e_flag
|= EPROC_SLEADER
;
1604 session_rele(sessp
);
1606 if (pg
!= PGRP_NULL
)
1611 * Fill in an eproc structure for the specified process.
1612 * bzeroed by our caller, so only set non-zero fields.
1615 fill_user32_externproc(proc_t p
, struct user32_extern_proc
*__restrict exp
)
1617 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1618 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1619 exp
->p_flag
= p
->p_flag
;
1620 if (p
->p_lflag
& P_LTRACED
)
1621 exp
->p_flag
|= P_TRACED
;
1622 if (p
->p_lflag
& P_LPPWAIT
)
1623 exp
->p_flag
|= P_PPWAIT
;
1624 if (p
->p_lflag
& P_LEXIT
)
1625 exp
->p_flag
|= P_WEXIT
;
1626 exp
->p_stat
= p
->p_stat
;
1627 exp
->p_pid
= p
->p_pid
;
1628 exp
->p_oppid
= p
->p_oppid
;
1630 exp
->user_stack
= p
->user_stack
;
1631 exp
->p_debugger
= p
->p_debugger
;
1632 exp
->sigwait
= p
->sigwait
;
1634 #ifdef _PROC_HAS_SCHEDINFO_
1635 exp
->p_estcpu
= p
->p_estcpu
;
1636 exp
->p_pctcpu
= p
->p_pctcpu
;
1637 exp
->p_slptime
= p
->p_slptime
;
1639 exp
->p_realtimer
.it_interval
.tv_sec
=
1640 (user32_time_t
)p
->p_realtimer
.it_interval
.tv_sec
;
1641 exp
->p_realtimer
.it_interval
.tv_usec
=
1642 (__int32_t
)p
->p_realtimer
.it_interval
.tv_usec
;
1644 exp
->p_realtimer
.it_value
.tv_sec
=
1645 (user32_time_t
)p
->p_realtimer
.it_value
.tv_sec
;
1646 exp
->p_realtimer
.it_value
.tv_usec
=
1647 (__int32_t
)p
->p_realtimer
.it_value
.tv_usec
;
1649 exp
->p_rtime
.tv_sec
= (user32_time_t
)p
->p_rtime
.tv_sec
;
1650 exp
->p_rtime
.tv_usec
= (__int32_t
)p
->p_rtime
.tv_usec
;
1652 exp
->p_sigignore
= p
->p_sigignore
;
1653 exp
->p_sigcatch
= p
->p_sigcatch
;
1654 exp
->p_priority
= p
->p_priority
;
1655 exp
->p_nice
= p
->p_nice
;
1656 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1657 exp
->p_xstat
= p
->p_xstat
;
1658 exp
->p_acflag
= p
->p_acflag
;
1662 * Fill in an LP64 version of extern_proc structure for the specified process.
1665 fill_user64_externproc(proc_t p
, struct user64_extern_proc
*__restrict exp
)
1667 exp
->p_starttime
.tv_sec
= p
->p_start
.tv_sec
;
1668 exp
->p_starttime
.tv_usec
= p
->p_start
.tv_usec
;
1669 exp
->p_flag
= p
->p_flag
;
1670 if (p
->p_lflag
& P_LTRACED
)
1671 exp
->p_flag
|= P_TRACED
;
1672 if (p
->p_lflag
& P_LPPWAIT
)
1673 exp
->p_flag
|= P_PPWAIT
;
1674 if (p
->p_lflag
& P_LEXIT
)
1675 exp
->p_flag
|= P_WEXIT
;
1676 exp
->p_stat
= p
->p_stat
;
1677 exp
->p_pid
= p
->p_pid
;
1678 exp
->p_oppid
= p
->p_oppid
;
1680 exp
->user_stack
= p
->user_stack
;
1681 exp
->p_debugger
= p
->p_debugger
;
1682 exp
->sigwait
= p
->sigwait
;
1684 #ifdef _PROC_HAS_SCHEDINFO_
1685 exp
->p_estcpu
= p
->p_estcpu
;
1686 exp
->p_pctcpu
= p
->p_pctcpu
;
1687 exp
->p_slptime
= p
->p_slptime
;
1689 exp
->p_realtimer
.it_interval
.tv_sec
= p
->p_realtimer
.it_interval
.tv_sec
;
1690 exp
->p_realtimer
.it_interval
.tv_usec
= p
->p_realtimer
.it_interval
.tv_usec
;
1692 exp
->p_realtimer
.it_value
.tv_sec
= p
->p_realtimer
.it_value
.tv_sec
;
1693 exp
->p_realtimer
.it_value
.tv_usec
= p
->p_realtimer
.it_value
.tv_usec
;
1695 exp
->p_rtime
.tv_sec
= p
->p_rtime
.tv_sec
;
1696 exp
->p_rtime
.tv_usec
= p
->p_rtime
.tv_usec
;
1698 exp
->p_sigignore
= p
->p_sigignore
;
1699 exp
->p_sigcatch
= p
->p_sigcatch
;
1700 exp
->p_priority
= p
->p_priority
;
1701 exp
->p_nice
= p
->p_nice
;
1702 bcopy(&p
->p_comm
, &exp
->p_comm
, MAXCOMLEN
);
1703 exp
->p_xstat
= p
->p_xstat
;
1704 exp
->p_acflag
= p
->p_acflag
;
1708 fill_user32_proc(proc_t p
, struct user32_kinfo_proc
*__restrict kp
)
1710 /* on a 64 bit kernel, 32 bit users get some truncated information */
1711 fill_user32_externproc(p
, &kp
->kp_proc
);
1712 fill_user32_eproc(p
, &kp
->kp_eproc
);
1716 fill_user64_proc(proc_t p
, struct user64_kinfo_proc
*__restrict kp
)
1718 fill_user64_externproc(p
, &kp
->kp_proc
);
1719 fill_user64_eproc(p
, &kp
->kp_eproc
);
1723 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1725 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1726 int *name
= arg1
; /* oid element argument vector */
1727 int namelen
= arg2
; /* number of oid element arguments */
1728 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1729 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1730 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1731 // size_t newlen = req->newlen; /* user buffer copy in size */
1733 proc_t p
= current_proc();
1739 ret
= suser(kauth_cred_get(), &p
->p_acflag
);
1753 case KERN_KDWRITETR
:
1754 case KERN_KDWRITEMAP
:
1758 case KERN_KDSETRTCDEC
:
1760 case KERN_KDGETENTROPY
:
1761 case KERN_KDENABLE_BG_TRACE
:
1762 case KERN_KDDISABLE_BG_TRACE
:
1763 case KERN_KDSET_TYPEFILTER
:
1765 ret
= kdbg_control(name
, namelen
, oldp
, oldlenp
);
1772 /* adjust index so we return the right required/consumed amount */
1774 req
->oldidx
+= req
->oldlen
;
1778 SYSCTL_PROC(_kern
, KERN_KDEBUG
, kdebug
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1779 0, /* Pointer argument (arg1) */
1780 0, /* Integer argument (arg2) */
1781 sysctl_kdebug_ops
, /* Handler function */
1782 NULL
, /* Data pointer */
1786 #if !CONFIG_EMBEDDED
1788 * Return the top *sizep bytes of the user stack, or the entire area of the
1789 * user stack down through the saved exec_path, whichever is smaller.
1792 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1794 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1795 int *name
= arg1
; /* oid element argument vector */
1796 int namelen
= arg2
; /* number of oid element arguments */
1797 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1798 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1799 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1800 // size_t newlen = req->newlen; /* user buffer copy in size */
1803 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 0);
1805 /* adjust index so we return the right required/consumed amount */
1807 req
->oldidx
+= req
->oldlen
;
1811 SYSCTL_PROC(_kern
, KERN_PROCARGS
, procargs
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1812 0, /* Pointer argument (arg1) */
1813 0, /* Integer argument (arg2) */
1814 sysctl_doprocargs
, /* Handler function */
1815 NULL
, /* Data pointer */
1817 #endif /* !CONFIG_EMBEDDED */
1820 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1822 __unused
int cmd
= oidp
->oid_arg2
; /* subcommand*/
1823 int *name
= arg1
; /* oid element argument vector */
1824 int namelen
= arg2
; /* number of oid element arguments */
1825 user_addr_t oldp
= req
->oldptr
; /* user buffer copy out address */
1826 size_t *oldlenp
= &req
->oldlen
; /* user buffer copy out size */
1827 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1828 // size_t newlen = req->newlen; /* user buffer copy in size */
1831 error
= sysctl_procargsx( name
, namelen
, oldp
, oldlenp
, current_proc(), 1);
1833 /* adjust index so we return the right required/consumed amount */
1835 req
->oldidx
+= req
->oldlen
;
1839 SYSCTL_PROC(_kern
, KERN_PROCARGS2
, procargs2
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
,
1840 0, /* Pointer argument (arg1) */
1841 0, /* Integer argument (arg2) */
1842 sysctl_doprocargs2
, /* Handler function */
1843 NULL
, /* Data pointer */
1847 sysctl_procargsx(int *name
, u_int namelen
, user_addr_t where
,
1848 size_t *sizep
, proc_t cur_proc
, int argc_yes
)
1851 int buflen
= where
!= USER_ADDR_NULL
? *sizep
: 0;
1853 struct _vm_map
*proc_map
;
1856 user_addr_t arg_addr
;
1861 vm_offset_t copy_start
, copy_end
;
1864 kauth_cred_t my_cred
;
1871 buflen
-= sizeof(int); /* reserve first word to return argc */
1873 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1874 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1875 /* is not NULL then the caller wants us to return the length needed to */
1876 /* hold the data we would return */
1877 if (where
!= USER_ADDR_NULL
&& (buflen
<= 0 || buflen
> ARG_MAX
)) {
1883 * Lookup process by pid
1892 * Copy the top N bytes of the stack.
1893 * On all machines we have so far, the stack grows
1896 * If the user expects no more than N bytes of
1897 * argument list, use that as a guess for the
1901 if (!p
->user_stack
) {
1906 if (where
== USER_ADDR_NULL
) {
1907 /* caller only wants to know length of proc args data */
1908 if (sizep
== NULL
) {
1913 size
= p
->p_argslen
;
1916 size
+= sizeof(int);
1920 * old PROCARGS will return the executable's path and plus some
1921 * extra space for work alignment and data tags
1923 size
+= PATH_MAX
+ (6 * sizeof(int));
1925 size
+= (size
& (sizeof(int) - 1)) ? (sizeof(int) - (size
& (sizeof(int) - 1))) : 0;
1930 my_cred
= kauth_cred_proc_ref(p
);
1931 uid
= kauth_cred_getuid(my_cred
);
1932 kauth_cred_unref(&my_cred
);
1934 if ((uid
!= kauth_cred_getuid(kauth_cred_get()))
1935 && suser(kauth_cred_get(), &cur_proc
->p_acflag
)) {
1940 if ((u_int
)arg_size
> p
->p_argslen
)
1941 arg_size
= round_page(p
->p_argslen
);
1943 arg_addr
= p
->user_stack
- arg_size
;
1947 * Before we can block (any VM code), make another
1948 * reference to the map to keep it alive. We do
1949 * that by getting a reference on the task itself.
1957 argslen
= p
->p_argslen
;
1959 * Once we have a task reference we can convert that into a
1960 * map reference, which we will use in the calls below. The
1961 * task/process may change its map after we take this reference
1962 * (see execve), but the worst that will happen then is a return
1963 * of stale info (which is always a possibility).
1965 task_reference(task
);
1967 proc_map
= get_task_map_reference(task
);
1968 task_deallocate(task
);
1970 if (proc_map
== NULL
)
1974 ret
= kmem_alloc(kernel_map
, ©_start
, round_page(arg_size
));
1975 if (ret
!= KERN_SUCCESS
) {
1976 vm_map_deallocate(proc_map
);
1980 copy_end
= round_page(copy_start
+ arg_size
);
1982 if( vm_map_copyin(proc_map
, (vm_map_address_t
)arg_addr
,
1983 (vm_map_size_t
)arg_size
, FALSE
, &tmp
) != KERN_SUCCESS
) {
1984 vm_map_deallocate(proc_map
);
1985 kmem_free(kernel_map
, copy_start
,
1986 round_page(arg_size
));
1991 * Now that we've done the copyin from the process'
1992 * map, we can release the reference to it.
1994 vm_map_deallocate(proc_map
);
1996 if( vm_map_copy_overwrite(kernel_map
,
1997 (vm_map_address_t
)copy_start
,
1998 tmp
, FALSE
) != KERN_SUCCESS
) {
1999 kmem_free(kernel_map
, copy_start
,
2000 round_page(arg_size
));
2004 if (arg_size
> argslen
) {
2005 data
= (caddr_t
) (copy_end
- argslen
);
2008 data
= (caddr_t
) (copy_end
- arg_size
);
2013 /* Put processes argc as the first word in the copyout buffer */
2014 suword(where
, p
->p_argc
);
2015 error
= copyout(data
, (where
+ sizeof(int)), size
);
2016 size
+= sizeof(int);
2018 error
= copyout(data
, where
, size
);
2021 * Make the old PROCARGS work to return the executable's path
2022 * But, only if there is enough space in the provided buffer
2024 * on entry: data [possibily] points to the beginning of the path
2026 * Note: we keep all pointers&sizes aligned to word boundries
2028 if ( (! error
) && (buflen
> 0 && (u_int
)buflen
> argslen
) )
2030 int binPath_sz
, alignedBinPath_sz
= 0;
2031 int extraSpaceNeeded
, addThis
;
2032 user_addr_t placeHere
;
2033 char * str
= (char *) data
;
2036 /* Some apps are really bad about messing up their stacks
2037 So, we have to be extra careful about getting the length
2038 of the executing binary. If we encounter an error, we bail.
2041 /* Limit ourselves to PATH_MAX paths */
2042 if ( max_len
> PATH_MAX
) max_len
= PATH_MAX
;
2046 while ( (binPath_sz
< max_len
-1) && (*str
++ != 0) )
2049 /* If we have a NUL terminator, copy it, too */
2050 if (binPath_sz
< max_len
-1) binPath_sz
+= 1;
2052 /* Pre-Flight the space requiremnts */
2054 /* Account for the padding that fills out binPath to the next word */
2055 alignedBinPath_sz
+= (binPath_sz
& (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz
& (sizeof(int)-1))) : 0;
2057 placeHere
= where
+ size
;
2059 /* Account for the bytes needed to keep placeHere word aligned */
2060 addThis
= (placeHere
& (sizeof(int)-1)) ? (sizeof(int)-(placeHere
& (sizeof(int)-1))) : 0;
2062 /* Add up all the space that is needed */
2063 extraSpaceNeeded
= alignedBinPath_sz
+ addThis
+ binPath_sz
+ (4 * sizeof(int));
2065 /* is there is room to tack on argv[0]? */
2066 if ( (buflen
& ~(sizeof(int)-1)) >= ( argslen
+ extraSpaceNeeded
))
2068 placeHere
+= addThis
;
2069 suword(placeHere
, 0);
2070 placeHere
+= sizeof(int);
2071 suword(placeHere
, 0xBFFF0000);
2072 placeHere
+= sizeof(int);
2073 suword(placeHere
, 0);
2074 placeHere
+= sizeof(int);
2075 error
= copyout(data
, placeHere
, binPath_sz
);
2078 placeHere
+= binPath_sz
;
2079 suword(placeHere
, 0);
2080 size
+= extraSpaceNeeded
;
2086 if (copy_start
!= (vm_offset_t
) 0) {
2087 kmem_free(kernel_map
, copy_start
, copy_end
- copy_start
);
2093 if (where
!= USER_ADDR_NULL
)
2100 * Max number of concurrent aio requests
2104 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2106 int new_value
, changed
;
2107 int error
= sysctl_io_number(req
, aio_max_requests
, sizeof(int), &new_value
, &changed
);
2109 /* make sure the system-wide limit is greater than the per process limit */
2110 if (new_value
>= aio_max_requests_per_process
&& new_value
<= AIO_MAX_REQUESTS
)
2111 aio_max_requests
= new_value
;
2120 * Max number of concurrent aio requests per process
2124 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2126 int new_value
, changed
;
2127 int error
= sysctl_io_number(req
, aio_max_requests_per_process
, sizeof(int), &new_value
, &changed
);
2129 /* make sure per process limit is less than the system-wide limit */
2130 if (new_value
<= aio_max_requests
&& new_value
>= AIO_LISTIO_MAX
)
2131 aio_max_requests_per_process
= new_value
;
2140 * Max number of async IO worker threads
2144 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2146 int new_value
, changed
;
2147 int error
= sysctl_io_number(req
, aio_worker_threads
, sizeof(int), &new_value
, &changed
);
2149 /* we only allow an increase in the number of worker threads */
2150 if (new_value
> aio_worker_threads
) {
2151 _aio_create_worker_threads((new_value
- aio_worker_threads
));
2152 aio_worker_threads
= new_value
;
2162 * System-wide limit on the max number of processes
2166 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2168 int new_value
, changed
;
2169 int error
= sysctl_io_number(req
, maxproc
, sizeof(int), &new_value
, &changed
);
2171 AUDIT_ARG(value32
, new_value
);
2172 /* make sure the system-wide limit is less than the configured hard
2173 limit set at kernel compilation */
2174 if (new_value
<= hard_maxproc
&& new_value
> 0)
2175 maxproc
= new_value
;
2182 SYSCTL_STRING(_kern
, KERN_OSTYPE
, ostype
,
2183 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2185 SYSCTL_STRING(_kern
, KERN_OSRELEASE
, osrelease
,
2186 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2188 SYSCTL_INT(_kern
, KERN_OSREV
, osrevision
,
2189 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2190 (int *)NULL
, BSD
, "");
2191 SYSCTL_STRING(_kern
, KERN_VERSION
, version
,
2192 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2194 SYSCTL_STRING(_kern
, OID_AUTO
, uuid
,
2195 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2196 &kernel_uuid
[0], 0, "");
2199 int debug_kprint_syscall
= 0;
2200 char debug_kprint_syscall_process
[MAXCOMLEN
+1];
2202 /* Thread safe: bits and string value are not used to reclaim state */
2203 SYSCTL_INT (_debug
, OID_AUTO
, kprint_syscall
,
2204 CTLFLAG_RW
| CTLFLAG_LOCKED
, &debug_kprint_syscall
, 0, "kprintf syscall tracing");
2205 SYSCTL_STRING(_debug
, OID_AUTO
, kprint_syscall_process
,
2206 CTLFLAG_RW
| CTLFLAG_LOCKED
, debug_kprint_syscall_process
, sizeof(debug_kprint_syscall_process
),
2207 "name of process for kprintf syscall tracing");
2209 int debug_kprint_current_process(const char **namep
)
2211 struct proc
*p
= current_proc();
2217 if (debug_kprint_syscall_process
[0]) {
2218 /* user asked to scope tracing to a particular process name */
2219 if(0 == strncmp(debug_kprint_syscall_process
,
2220 p
->p_comm
, sizeof(debug_kprint_syscall_process
))) {
2221 /* no value in telling the user that we traced what they asked */
2222 if(namep
) *namep
= NULL
;
2230 /* trace all processes. Tell user what we traced */
2239 /* PR-5293665: need to use a callback function for kern.osversion to set
2240 * osversion in IORegistry */
2243 sysctl_osversion(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
, struct sysctl_req
*req
)
2247 rval
= sysctl_handle_string(oidp
, arg1
, arg2
, req
);
2250 IORegistrySetOSBuildVersion((char *)arg1
);
2256 SYSCTL_PROC(_kern
, KERN_OSVERSION
, osversion
,
2257 CTLFLAG_RW
| CTLFLAG_KERN
| CTLTYPE_STRING
| CTLFLAG_LOCKED
,
2258 osversion
, 256 /* OSVERSIZE*/,
2259 sysctl_osversion
, "A", "");
2262 sysctl_sysctl_bootargs
2263 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2268 strlcpy(buf
, PE_boot_args(), 256);
2269 error
= sysctl_io_string(req
, buf
, 256, 0, NULL
);
2273 SYSCTL_PROC(_kern
, OID_AUTO
, bootargs
,
2274 CTLFLAG_LOCKED
| CTLFLAG_RD
| CTLFLAG_KERN
| CTLTYPE_STRING
,
2276 sysctl_sysctl_bootargs
, "A", "bootargs");
2278 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
,
2279 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2281 SYSCTL_INT(_kern
, KERN_ARGMAX
, argmax
,
2282 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2283 (int *)NULL
, ARG_MAX
, "");
2284 SYSCTL_INT(_kern
, KERN_POSIX1
, posix1version
,
2285 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2286 (int *)NULL
, _POSIX_VERSION
, "");
2287 SYSCTL_INT(_kern
, KERN_NGROUPS
, ngroups
,
2288 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2289 (int *)NULL
, NGROUPS_MAX
, "");
2290 SYSCTL_INT(_kern
, KERN_JOB_CONTROL
, job_control
,
2291 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2292 (int *)NULL
, 1, "");
2293 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2294 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2295 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2296 (int *)NULL
, 1, "");
2298 SYSCTL_INT(_kern
, KERN_SAVED_IDS
, saved_ids
,
2299 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2302 SYSCTL_INT(_kern
, OID_AUTO
, num_files
,
2303 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2305 SYSCTL_COMPAT_INT(_kern
, OID_AUTO
, num_vnodes
,
2306 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2308 SYSCTL_INT(_kern
, OID_AUTO
, num_tasks
,
2309 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2311 SYSCTL_INT(_kern
, OID_AUTO
, num_threads
,
2312 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2313 &thread_max
, 0, "");
2314 SYSCTL_INT(_kern
, OID_AUTO
, num_taskthreads
,
2315 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2316 &task_threadmax
, 0, "");
2319 sysctl_maxvnodes (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2321 int oldval
= desiredvnodes
;
2322 int error
= sysctl_io_number(req
, desiredvnodes
, sizeof(int), &desiredvnodes
, NULL
);
2324 if (oldval
!= desiredvnodes
) {
2325 reset_vmobjectcache(oldval
, desiredvnodes
);
2326 resize_namecache(desiredvnodes
);
2332 SYSCTL_INT(_kern
, OID_AUTO
, namecache_disabled
,
2333 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2334 &nc_disabled
, 0, "");
2336 SYSCTL_PROC(_kern
, KERN_MAXVNODES
, maxvnodes
,
2337 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2338 0, 0, sysctl_maxvnodes
, "I", "");
2340 SYSCTL_PROC(_kern
, KERN_MAXPROC
, maxproc
,
2341 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2342 0, 0, sysctl_maxproc
, "I", "");
2344 SYSCTL_PROC(_kern
, KERN_AIOMAX
, aiomax
,
2345 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2346 0, 0, sysctl_aiomax
, "I", "");
2348 SYSCTL_PROC(_kern
, KERN_AIOPROCMAX
, aioprocmax
,
2349 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2350 0, 0, sysctl_aioprocmax
, "I", "");
2352 SYSCTL_PROC(_kern
, KERN_AIOTHREADS
, aiothreads
,
2353 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2354 0, 0, sysctl_aiothreads
, "I", "");
2358 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2360 int new_value
, changed
;
2361 int error
= sysctl_io_number(req
, securelevel
, sizeof(int), &new_value
, &changed
);
2363 if (!(new_value
< securelevel
&& req
->p
->p_pid
!= 1)) {
2365 securelevel
= new_value
;
2374 SYSCTL_PROC(_kern
, KERN_SECURELVL
, securelevel
,
2375 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2376 0, 0, sysctl_securelvl
, "I", "");
2381 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2384 error
= sysctl_io_string(req
, domainname
, sizeof(domainname
), 0, &changed
);
2386 domainnamelen
= strlen(domainname
);
2391 SYSCTL_PROC(_kern
, KERN_DOMAINNAME
, nisdomainname
,
2392 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2393 0, 0, sysctl_domainname
, "A", "");
2395 SYSCTL_COMPAT_INT(_kern
, KERN_HOSTID
, hostid
,
2396 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2401 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2404 error
= sysctl_io_string(req
, hostname
, sizeof(hostname
), 1, &changed
);
2406 hostnamelen
= req
->newlen
;
2412 SYSCTL_PROC(_kern
, KERN_HOSTNAME
, hostname
,
2413 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2414 0, 0, sysctl_hostname
, "A", "");
2418 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2420 /* Original code allowed writing, I'm copying this, although this all makes
2421 no sense to me. Besides, this sysctl is never used. */
2422 return sysctl_io_string(req
, &req
->p
->p_name
[0], (2*MAXCOMLEN
+1), 1, NULL
);
2425 SYSCTL_PROC(_kern
, KERN_PROCNAME
, procname
,
2426 CTLTYPE_STRING
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2427 0, 0, sysctl_procname
, "A", "");
2429 SYSCTL_INT(_kern
, KERN_SPECULATIVE_READS
, speculative_reads_disabled
,
2430 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2431 &speculative_reads_disabled
, 0, "");
2433 SYSCTL_INT(_kern
, OID_AUTO
, ignore_is_ssd
,
2434 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2435 &ignore_is_ssd
, 0, "");
2437 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_max
,
2438 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2439 &preheat_pages_max
, 0, "");
2441 SYSCTL_UINT(_kern
, OID_AUTO
, preheat_pages_min
,
2442 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2443 &preheat_pages_min
, 0, "");
2445 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max
,
2446 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2447 &speculative_prefetch_max
, 0, "");
2449 SYSCTL_UINT(_kern
, OID_AUTO
, speculative_prefetch_max_iosize
,
2450 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2451 &speculative_prefetch_max_iosize
, 0, "");
2453 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_target
,
2454 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2455 &vm_page_free_target
, 0, "");
2457 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_min
,
2458 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2459 &vm_page_free_min
, 0, "");
2461 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_free_reserved
,
2462 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2463 &vm_page_free_reserved
, 0, "");
2465 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_percentage
,
2466 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2467 &vm_page_speculative_percentage
, 0, "");
2469 SYSCTL_UINT(_kern
, OID_AUTO
, vm_page_speculative_q_age_ms
,
2470 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2471 &vm_page_speculative_q_age_ms
, 0, "");
2473 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_delayed_work_limit
,
2474 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2475 &vm_max_delayed_work_limit
, 0, "");
2477 SYSCTL_UINT(_kern
, OID_AUTO
, vm_max_batch
,
2478 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2479 &vm_max_batch
, 0, "");
2484 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2486 time_t tv_sec
= boottime_sec();
2487 struct proc
*p
= req
->p
;
2489 if (proc_is64bit(p
)) {
2490 struct user64_timeval t
;
2493 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2495 struct user32_timeval t
;
2498 return sysctl_io_opaque(req
, &t
, sizeof(t
), NULL
);
2502 SYSCTL_PROC(_kern
, KERN_BOOTTIME
, boottime
,
2503 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2504 0, 0, sysctl_boottime
, "S,timeval", "");
2508 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2511 int error
= get_kernel_symfile(req
->p
, &str
);
2514 return sysctl_io_string(req
, str
, 0, 0, NULL
);
2518 SYSCTL_PROC(_kern
, KERN_SYMFILE
, symfile
,
2519 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2520 0, 0, sysctl_symfile
, "A", "");
2525 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2527 return sysctl_io_number(req
, netboot_root(), sizeof(int), NULL
, NULL
);
2530 SYSCTL_PROC(_kern
, KERN_NETBOOT
, netboot
,
2531 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2532 0, 0, sysctl_netboot
, "I", "");
2535 #ifdef CONFIG_IMGSRC_ACCESS
2537 * Legacy--act as if only one layer of nesting is possible.
2541 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2543 vfs_context_t ctx
= vfs_context_current();
2547 if (!vfs_context_issuser(ctx
)) {
2551 if (imgsrc_rootvnodes
[0] == NULL
) {
2555 result
= vnode_getwithref(imgsrc_rootvnodes
[0]);
2560 devvp
= vnode_mount(imgsrc_rootvnodes
[0])->mnt_devvp
;
2561 result
= vnode_getwithref(devvp
);
2566 result
= sysctl_io_number(req
, vnode_specrdev(devvp
), sizeof(dev_t
), NULL
, NULL
);
2570 vnode_put(imgsrc_rootvnodes
[0]);
2574 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcdev
,
2575 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2576 0, 0, sysctl_imgsrcdev
, "I", "");
2580 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2583 struct imgsrc_info info
[MAX_IMAGEBOOT_NESTING
]; /* 2 for now, no problem */
2587 if (imgsrc_rootvnodes
[0] == NULLVP
) {
2591 for (i
= 0; i
< MAX_IMAGEBOOT_NESTING
; i
++) {
2593 * Go get the root vnode.
2595 rvp
= imgsrc_rootvnodes
[i
];
2596 if (rvp
== NULLVP
) {
2600 error
= vnode_get(rvp
);
2606 * For now, no getting at a non-local volume.
2608 devvp
= vnode_mount(rvp
)->mnt_devvp
;
2609 if (devvp
== NULL
) {
2614 error
= vnode_getwithref(devvp
);
2623 info
[i
].ii_dev
= vnode_specrdev(devvp
);
2624 info
[i
].ii_flags
= 0;
2625 info
[i
].ii_height
= i
;
2626 bzero(info
[i
].ii_reserved
, sizeof(info
[i
].ii_reserved
));
2632 return sysctl_io_opaque(req
, info
, i
* sizeof(info
[0]), NULL
);
2635 SYSCTL_PROC(_kern
, OID_AUTO
, imgsrcinfo
,
2636 CTLTYPE_OPAQUE
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2637 0, 0, sysctl_imgsrcinfo
, "I", "");
2639 #endif /* CONFIG_IMGSRC_ACCESS */
2641 SYSCTL_INT(_kern
, OID_AUTO
, timer_coalescing_enabled
,
2642 CTLFLAG_RW
| CTLFLAG_LOCKED
,
2643 &mach_timer_coalescing_enabled
, 0, "");
2647 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2649 return sysctl_io_number(req
, (int)req
->p
->user_stack
, sizeof(int), NULL
, NULL
);
2652 SYSCTL_PROC(_kern
, KERN_USRSTACK32
, usrstack
,
2653 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2654 0, 0, sysctl_usrstack
, "I", "");
2658 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2660 return sysctl_io_number(req
, req
->p
->user_stack
, sizeof(req
->p
->user_stack
), NULL
, NULL
);
2663 SYSCTL_PROC(_kern
, KERN_USRSTACK64
, usrstack64
,
2664 CTLTYPE_QUAD
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2665 0, 0, sysctl_usrstack64
, "Q", "");
2667 SYSCTL_STRING(_kern
, KERN_COREFILE
, corefile
,
2668 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2669 corefilename
, sizeof(corefilename
), "");
2673 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2675 #ifdef SECURE_KERNEL
2678 int new_value
, changed
;
2679 int error
= sysctl_io_number(req
, do_coredump
, sizeof(int), &new_value
, &changed
);
2681 if ((new_value
== 0) || (new_value
== 1))
2682 do_coredump
= new_value
;
2689 SYSCTL_PROC(_kern
, KERN_COREDUMP
, coredump
,
2690 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2691 0, 0, sysctl_coredump
, "I", "");
2694 sysctl_suid_coredump
2695 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2697 #ifdef SECURE_KERNEL
2700 int new_value
, changed
;
2701 int error
= sysctl_io_number(req
, sugid_coredump
, sizeof(int), &new_value
, &changed
);
2703 if ((new_value
== 0) || (new_value
== 1))
2704 sugid_coredump
= new_value
;
2711 SYSCTL_PROC(_kern
, KERN_SUGID_COREDUMP
, sugid_coredump
,
2712 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2713 0, 0, sysctl_suid_coredump
, "I", "");
2717 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2719 struct proc
*p
= req
->p
;
2720 int new_value
, changed
;
2721 int error
= sysctl_io_number(req
, (req
->p
->p_lflag
& P_LDELAYTERM
)? 1: 0, sizeof(int), &new_value
, &changed
);
2725 req
->p
->p_lflag
|= P_LDELAYTERM
;
2727 req
->p
->p_lflag
&= ~P_LDELAYTERM
;
2733 SYSCTL_PROC(_kern
, KERN_PROCDELAYTERM
, delayterm
,
2734 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2735 0, 0, sysctl_delayterm
, "I", "");
2740 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2742 struct proc
*p
= req
->p
;
2744 int new_value
, old_value
, changed
;
2747 ut
= get_bsdthread_info(current_thread());
2749 if (ut
->uu_flag
& UT_RAGE_VNODES
)
2750 old_value
= KERN_RAGE_THREAD
;
2751 else if (p
->p_lflag
& P_LRAGE_VNODES
)
2752 old_value
= KERN_RAGE_PROC
;
2756 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2759 switch (new_value
) {
2760 case KERN_RAGE_PROC
:
2762 p
->p_lflag
|= P_LRAGE_VNODES
;
2765 case KERN_UNRAGE_PROC
:
2767 p
->p_lflag
&= ~P_LRAGE_VNODES
;
2771 case KERN_RAGE_THREAD
:
2772 ut
->uu_flag
|= UT_RAGE_VNODES
;
2774 case KERN_UNRAGE_THREAD
:
2775 ut
= get_bsdthread_info(current_thread());
2776 ut
->uu_flag
&= ~UT_RAGE_VNODES
;
2783 SYSCTL_PROC(_kern
, KERN_RAGEVNODE
, rage_vnode
,
2784 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2785 0, 0, sysctl_rage_vnode
, "I", "");
2787 /* XXX move this interface into libproc and remove this sysctl */
2789 sysctl_setthread_cpupercent
2790 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2792 int new_value
, old_value
;
2794 kern_return_t kret
= KERN_SUCCESS
;
2795 uint8_t percent
= 0;
2800 if ((error
= sysctl_io_number(req
, old_value
, sizeof(old_value
), &new_value
, NULL
)) != 0)
2803 percent
= new_value
& 0xff; /* low 8 bytes for perent */
2804 ms_refill
= (new_value
>> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2809 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2811 if ((kret
= thread_set_cpulimit(THREAD_CPULIMIT_BLOCK
, percent
, ms_refill
* NSEC_PER_MSEC
)) != 0)
2817 SYSCTL_PROC(_kern
, OID_AUTO
, setthread_cpupercent
,
2818 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
,
2819 0, 0, sysctl_setthread_cpupercent
, "I", "set thread cpu percentage limit");
2823 sysctl_kern_check_openevt
2824 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2826 struct proc
*p
= req
->p
;
2827 int new_value
, old_value
, changed
;
2830 if (p
->p_flag
& P_CHECKOPENEVT
) {
2831 old_value
= KERN_OPENEVT_PROC
;
2836 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, &changed
);
2839 switch (new_value
) {
2840 case KERN_OPENEVT_PROC
:
2841 OSBitOrAtomic(P_CHECKOPENEVT
, &p
->p_flag
);
2844 case KERN_UNOPENEVT_PROC
:
2845 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT
), &p
->p_flag
);
2855 SYSCTL_PROC(_kern
, KERN_CHECKOPENEVT
, check_openevt
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
,
2856 0, 0, sysctl_kern_check_openevt
, "I", "set the per-process check-open-evt flag");
2862 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2864 #ifdef SECURE_KERNEL
2867 int new_value
, changed
;
2870 error
= sysctl_io_number(req
, nx_enabled
, sizeof(nx_enabled
), &new_value
, &changed
);
2875 #if defined(__i386__) || defined(__x86_64__)
2877 * Only allow setting if NX is supported on the chip
2879 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
2882 nx_enabled
= new_value
;
2889 SYSCTL_PROC(_kern
, KERN_NX_PROTECTION
, nx
,
2890 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
2891 0, 0, sysctl_nx
, "I", "");
2895 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2897 if (proc_is64bit(req
->p
)) {
2898 struct user64_loadavg loadinfo64
;
2899 fill_loadavg64(&averunnable
, &loadinfo64
);
2900 return sysctl_io_opaque(req
, &loadinfo64
, sizeof(loadinfo64
), NULL
);
2902 struct user32_loadavg loadinfo32
;
2903 fill_loadavg32(&averunnable
, &loadinfo32
);
2904 return sysctl_io_opaque(req
, &loadinfo32
, sizeof(loadinfo32
), NULL
);
2908 SYSCTL_PROC(_vm
, VM_LOADAVG
, loadavg
,
2909 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2910 0, 0, sysctl_loadavg
, "S,loadavg", "");
2913 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2916 sysctl_vm_toggle_address_reuse(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
,
2917 __unused
int arg2
, struct sysctl_req
*req
)
2919 int old_value
=0, new_value
=0, error
=0;
2921 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE
, &old_value
))
2923 error
= sysctl_io_number(req
, old_value
, sizeof(int), &new_value
, NULL
);
2925 return (vm_toggle_entry_reuse(new_value
, NULL
));
2930 SYSCTL_PROC(_debug
, OID_AUTO
, toggle_address_reuse
, CTLFLAG_ANYBODY
| CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0, sysctl_vm_toggle_address_reuse
,"I","");
2934 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
2937 uint64_t swap_total
;
2938 uint64_t swap_avail
;
2939 vm_size_t swap_pagesize
;
2940 boolean_t swap_encrypted
;
2941 struct xsw_usage xsu
;
2943 error
= macx_swapinfo(&swap_total
,
2950 xsu
.xsu_total
= swap_total
;
2951 xsu
.xsu_avail
= swap_avail
;
2952 xsu
.xsu_used
= swap_total
- swap_avail
;
2953 xsu
.xsu_pagesize
= swap_pagesize
;
2954 xsu
.xsu_encrypted
= swap_encrypted
;
2955 return sysctl_io_opaque(req
, &xsu
, sizeof(xsu
), NULL
);
2960 SYSCTL_PROC(_vm
, VM_SWAPUSAGE
, swapusage
,
2961 CTLTYPE_STRUCT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
2962 0, 0, sysctl_swapusage
, "S,xsw_usage", "");
2965 extern void vm_page_reactivate_all_throttled(void);
2968 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2970 #pragma unused(arg1, arg2)
2971 int error
, val
= memorystatus_freeze_enabled
? 1 : 0;
2974 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
2975 if (error
|| !req
->newptr
)
2979 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2981 disabled
= (!val
&& memorystatus_freeze_enabled
);
2983 memorystatus_freeze_enabled
= val
? TRUE
: FALSE
;
2986 vm_page_reactivate_all_throttled();
2992 SYSCTL_PROC(_vm
, OID_AUTO
, freeze_enabled
, CTLTYPE_INT
|CTLFLAG_RW
, &memorystatus_freeze_enabled
, 0, sysctl_freeze_enabled
, "I", "");
2993 #endif /* CONFIG_FREEZE */
2995 /* this kernel does NOT implement shared_region_make_private_np() */
2996 SYSCTL_INT(_kern
, KERN_SHREG_PRIVATIZABLE
, shreg_private
,
2997 CTLFLAG_RD
| CTLFLAG_LOCKED
,
2998 (int *)NULL
, 0, "");
3000 #if defined(__i386__) || defined(__x86_64__)
3002 sysctl_sysctl_exec_affinity(__unused
struct sysctl_oid
*oidp
,
3003 __unused
void *arg1
, __unused
int arg2
,
3004 struct sysctl_req
*req
)
3006 proc_t cur_proc
= req
->p
;
3009 if (req
->oldptr
!= USER_ADDR_NULL
) {
3010 cpu_type_t oldcputype
= (cur_proc
->p_flag
& P_AFFINITY
) ? CPU_TYPE_POWERPC
: CPU_TYPE_I386
;
3011 if ((error
= SYSCTL_OUT(req
, &oldcputype
, sizeof(oldcputype
))))
3015 if (req
->newptr
!= USER_ADDR_NULL
) {
3016 cpu_type_t newcputype
;
3017 if ((error
= SYSCTL_IN(req
, &newcputype
, sizeof(newcputype
))))
3019 if (newcputype
== CPU_TYPE_I386
)
3020 OSBitAndAtomic(~((uint32_t)P_AFFINITY
), &cur_proc
->p_flag
);
3021 else if (newcputype
== CPU_TYPE_POWERPC
)
3022 OSBitOrAtomic(P_AFFINITY
, &cur_proc
->p_flag
);
3029 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_exec_affinity
, CTLTYPE_INT
|CTLFLAG_RW
|CTLFLAG_ANYBODY
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_exec_affinity
,"I","proc_exec_affinity");
3033 fetch_process_cputype(
3037 cpu_type_t
*cputype
)
3039 proc_t p
= PROC_NULL
;
3046 else if (namelen
== 1) {
3047 p
= proc_find(name
[0]);
3056 #if defined(__i386__) || defined(__x86_64__)
3057 if (p
->p_flag
& P_TRANSLATED
) {
3058 ret
= CPU_TYPE_POWERPC
;
3064 if (IS_64BIT_PROCESS(p
))
3065 ret
|= CPU_ARCH_ABI64
;
3076 sysctl_sysctl_native(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3077 struct sysctl_req
*req
)
3080 cpu_type_t proc_cputype
= 0;
3081 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3084 if ((proc_cputype
& ~CPU_ARCH_MASK
) != (cpu_type() & ~CPU_ARCH_MASK
))
3086 return SYSCTL_OUT(req
, &res
, sizeof(res
));
3088 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_native
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_native
,"I","proc_native");
3091 sysctl_sysctl_cputype(__unused
struct sysctl_oid
*oidp
, void *arg1
, int arg2
,
3092 struct sysctl_req
*req
)
3095 cpu_type_t proc_cputype
= 0;
3096 if ((error
= fetch_process_cputype(req
->p
, (int *)arg1
, arg2
, &proc_cputype
)) != 0)
3098 return SYSCTL_OUT(req
, &proc_cputype
, sizeof(proc_cputype
));
3100 SYSCTL_PROC(_sysctl
, OID_AUTO
, proc_cputype
, CTLTYPE_NODE
|CTLFLAG_RD
| CTLFLAG_LOCKED
, 0, 0, sysctl_sysctl_cputype
,"I","proc_cputype");
3104 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3106 return sysctl_io_number(req
, boothowto
& RB_SAFEBOOT
? 1 : 0, sizeof(int), NULL
, NULL
);
3109 SYSCTL_PROC(_kern
, KERN_SAFEBOOT
, safeboot
,
3110 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3111 0, 0, sysctl_safeboot
, "I", "");
3115 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3117 return sysctl_io_number(req
, boothowto
& RB_SINGLE
? 1 : 0, sizeof(int), NULL
, NULL
);
3120 SYSCTL_PROC(_kern
, OID_AUTO
, singleuser
,
3121 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3122 0, 0, sysctl_singleuser
, "I", "");
3125 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3127 extern boolean_t affinity_sets_enabled
;
3128 extern int affinity_sets_mapping
;
3130 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_enabled
,
3131 CTLFLAG_RW
| CTLFLAG_LOCKED
, (int *) &affinity_sets_enabled
, 0, "hinting enabled");
3132 SYSCTL_INT (_kern
, OID_AUTO
, affinity_sets_mapping
,
3133 CTLFLAG_RW
| CTLFLAG_LOCKED
, &affinity_sets_mapping
, 0, "mapping policy");
3136 * Boolean indicating if KASLR is active.
3140 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
3144 slide
= vm_kernel_slide
? 1 : 0;
3146 return sysctl_io_number( req
, slide
, sizeof(int), NULL
, NULL
);
3149 SYSCTL_PROC(_kern
, OID_AUTO
, slide
,
3150 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
3151 0, 0, sysctl_slide
, "I", "");
3154 * Limit on total memory users can wire.
3156 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3158 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3160 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3163 * All values are in bytes.
3166 vm_map_size_t vm_global_no_user_wire_amount
;
3167 vm_map_size_t vm_global_user_wire_limit
;
3168 vm_map_size_t vm_user_wire_limit
;
3171 * There needs to be a more automatic/elegant way to do this
3173 SYSCTL_QUAD(_vm
, OID_AUTO
, global_no_user_wire_amount
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_no_user_wire_amount
, "");
3174 SYSCTL_QUAD(_vm
, OID_AUTO
, global_user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_global_user_wire_limit
, "");
3175 SYSCTL_QUAD(_vm
, OID_AUTO
, user_wire_limit
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_user_wire_limit
, "");
3177 extern int vm_map_copy_overwrite_aligned_src_not_internal
;
3178 extern int vm_map_copy_overwrite_aligned_src_not_symmetric
;
3179 extern int vm_map_copy_overwrite_aligned_src_large
;
3180 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_internal
, 0, "");
3181 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_not_symmetric
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_not_symmetric
, 0, "");
3182 SYSCTL_INT(_vm
, OID_AUTO
, vm_copy_src_large
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_map_copy_overwrite_aligned_src_large
, 0, "");
3186 * enable back trace events for thread blocks
3189 extern uint32_t kdebug_thread_block
;
3191 SYSCTL_INT (_kern
, OID_AUTO
, kdebug_thread_block
,
3192 CTLFLAG_RW
| CTLFLAG_LOCKED
, &kdebug_thread_block
, 0, "kdebug thread_block");
3195 * Kernel stack size and depth
3197 SYSCTL_INT (_kern
, OID_AUTO
, stack_size
,
3198 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_size
, 0, "Kernel stack size");
3199 SYSCTL_INT (_kern
, OID_AUTO
, stack_depth_max
,
3200 CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *) &kernel_stack_depth_max
, 0, "Max kernel stack depth at interrupt or context switch");
3203 * enable back trace for port allocations
3205 extern int ipc_portbt
;
3207 SYSCTL_INT(_kern
, OID_AUTO
, ipc_portbt
,
3208 CTLFLAG_RW
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3209 &ipc_portbt
, 0, "");
3216 * See osfmk/kern/sched_prim.c for the corresponding definition
3217 * in osfmk/. If either version changes, update the other.
3219 #define SCHED_STRING_MAX_LENGTH (48)
3221 extern char sched_string
[SCHED_STRING_MAX_LENGTH
];
3222 SYSCTL_STRING(_kern
, OID_AUTO
, sched
,
3223 CTLFLAG_RD
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
3224 sched_string
, sizeof(sched_string
),
3225 "Timeshare scheduler implementation");
3228 * Only support runtime modification on embedded platforms
3229 * with development config enabled
3233 extern int precise_user_kernel_time
;
3234 SYSCTL_INT(_kern
, OID_AUTO
, precise_user_kernel_time
,
3235 CTLFLAG_RW
| CTLFLAG_LOCKED
,
3236 &precise_user_kernel_time
, 0, "Precise accounting of kernel vs. user time");