]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_sysctl.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
2d21ac55
A
66/*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
1c79356b
A
72
73/*
2d21ac55
A
74* DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
1c79356b
A
84 */
85
86#include <sys/param.h>
87#include <sys/systm.h>
88#include <sys/kernel.h>
89#include <sys/malloc.h>
91447636
A
90#include <sys/proc_internal.h>
91#include <sys/kauth.h>
92#include <sys/file_internal.h>
93#include <sys/vnode_internal.h>
1c79356b
A
94#include <sys/unistd.h>
95#include <sys/buf.h>
96#include <sys/ioctl.h>
55e303ae 97#include <sys/namei.h>
1c79356b
A
98#include <sys/tty.h>
99#include <sys/disklabel.h>
100#include <sys/vm.h>
101#include <sys/sysctl.h>
9bccf70c 102#include <sys/user.h>
55e303ae 103#include <sys/aio_kern.h>
2d21ac55 104#include <sys/reboot.h>
3e170ce0
A
105#include <sys/memory_maintenance.h>
106#include <sys/priv.h>
5ba3f43e 107#include <stdatomic.h>
e5568f75 108
b0d623f7
A
109#include <security/audit/audit.h>
110#include <kern/kalloc.h>
e5568f75 111
5ba3f43e 112#include <machine/smp.h>
1c79356b 113#include <mach/machine.h>
6d2010ae 114#include <mach/mach_host.h>
1c79356b 115#include <mach/mach_types.h>
5ba3f43e 116#include <mach/processor_info.h>
1c79356b 117#include <mach/vm_param.h>
5ba3f43e 118#include <kern/debug.h>
b0d623f7 119#include <kern/mach_param.h>
1c79356b 120#include <kern/task.h>
316670eb 121#include <kern/thread.h>
5ba3f43e 122#include <kern/thread_group.h>
6d2010ae 123#include <kern/processor.h>
5ba3f43e 124#include <kern/cpu_number.h>
b0d623f7 125#include <kern/debug.h>
3e170ce0 126#include <kern/sched_prim.h>
1c79356b 127#include <vm/vm_kern.h>
91447636 128#include <vm/vm_map.h>
1c79356b
A
129#include <mach/host_info.h>
130
91447636 131#include <sys/mount_internal.h>
1c79356b
A
132#include <sys/kdebug.h>
133
134#include <IOKit/IOPlatformExpert.h>
135#include <pexpert/pexpert.h>
136
55e303ae 137#include <machine/machine_routines.h>
0c530ab8 138#include <machine/exec.h>
1c79356b 139
91447636 140#include <vm/vm_protos.h>
39236c6e 141#include <vm/vm_pageout.h>
39037602 142#include <vm/vm_compressor_algorithms.h>
6d2010ae 143#include <sys/imgsrc.h>
fe8ab488 144#include <kern/timer_call.h>
91447636 145
b0d623f7 146#if defined(__i386__) || defined(__x86_64__)
0c530ab8
A
147#include <i386/cpuid.h>
148#endif
149
316670eb
A
150#if CONFIG_FREEZE
151#include <sys/kern_memorystatus.h>
152#endif
153
39236c6e
A
154#if KPERF
155#include <kperf/kperf.h>
156#endif
157
fe8ab488
A
158#if HYPERVISOR
159#include <kern/hv_support.h>
160#endif
161
316670eb
A
162/*
163 * deliberately setting max requests to really high number
164 * so that runaway settings do not cause MALLOC overflows
165 */
166#define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
167
55e303ae
A
168extern int aio_max_requests;
169extern int aio_max_requests_per_process;
170extern int aio_worker_threads;
91447636
A
171extern int lowpri_IO_window_msecs;
172extern int lowpri_IO_delay_msecs;
0c530ab8 173extern int nx_enabled;
2d21ac55 174extern int speculative_reads_disabled;
6d2010ae 175extern unsigned int speculative_prefetch_max;
316670eb 176extern unsigned int speculative_prefetch_max_iosize;
fe8ab488
A
177extern unsigned int preheat_max_bytes;
178extern unsigned int preheat_min_bytes;
b0d623f7 179extern long numvnodes;
1c79356b 180
39236c6e
A
181extern uuid_string_t bootsessionuuid_string;
182
6d2010ae
A
183extern unsigned int vm_max_delayed_work_limit;
184extern unsigned int vm_max_batch;
185
186extern unsigned int vm_page_free_min;
187extern unsigned int vm_page_free_target;
188extern unsigned int vm_page_free_reserved;
189extern unsigned int vm_page_speculative_percentage;
190extern unsigned int vm_page_speculative_q_age_ms;
191
04b8595b
A
192#if (DEVELOPMENT || DEBUG)
193extern uint32_t vm_page_creation_throttled_hard;
194extern uint32_t vm_page_creation_throttled_soft;
195#endif /* DEVELOPMENT || DEBUG */
196
6d2010ae
A
197/*
198 * Conditionally allow dtrace to see these functions for debugging purposes.
199 */
200#ifdef STATIC
201#undef STATIC
202#endif
203#if 0
204#define STATIC
205#else
206#define STATIC static
207#endif
208
209extern boolean_t mach_timer_coalescing_enabled;
210
39236c6e
A
211extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
212
6d2010ae 213STATIC void
316670eb
A
214fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
215STATIC void
216fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
6d2010ae 217STATIC void
316670eb 218fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
6d2010ae 219STATIC void
316670eb 220fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
6d2010ae 221STATIC void
316670eb 222fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
6d2010ae 223STATIC void
316670eb
A
224fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
225
91447636
A
226extern int
227kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
91447636
A
228#if NFSCLIENT
229extern int
230netboot_root(void);
231#endif
232int
233pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
2d21ac55 234 proc_t p);
91447636
A
235int
236sysctl_procargs(int *name, u_int namelen, user_addr_t where,
2d21ac55 237 size_t *sizep, proc_t cur_proc);
6d2010ae 238STATIC int
91447636 239sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
2d21ac55 240 proc_t cur_proc, int argc_yes);
91447636
A
241int
242sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
243 size_t newlen, void *sp, int len);
1c79356b 244
6d2010ae
A
245STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
246STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
247STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
248STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
249STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
2d21ac55 250int sysdoproc_callback(proc_t p, void *arg);
1c79356b 251
6d2010ae
A
252
253/* forward declarations for non-static STATIC */
254STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
255STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
6d2010ae
A
256STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
257STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
258STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
6d2010ae 259STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
6d2010ae
A
260#if COUNT_SYSCALLS
261STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
262#endif /* COUNT_SYSCALLS */
5ba3f43e 263#if !CONFIG_EMBEDDED
6d2010ae 264STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
5ba3f43e 265#endif /* !CONFIG_EMBEDDED */
6d2010ae
A
266STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
267STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
6d2010ae
A
268STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
276STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
279STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
281#if NFSCLIENT
282STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283#endif
284#ifdef CONFIG_IMGSRC_ACCESS
285STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286#endif
287STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
288STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
39037602 289#if CONFIG_COREDUMP
6d2010ae
A
290STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
39037602 292#endif
6d2010ae
A
293STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
6d2010ae
A
300STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
301STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
302STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
303STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
304STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
3e170ce0 305STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
316670eb 306STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
6d2010ae 307
2d21ac55
A
308
309extern void IORegistrySetOSBuildVersion(char * build_version);
91447636 310
6d2010ae 311STATIC void
b0d623f7 312fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
91447636 313{
b0d623f7
A
314 la64->ldavg[0] = la->ldavg[0];
315 la64->ldavg[1] = la->ldavg[1];
316 la64->ldavg[2] = la->ldavg[2];
317 la64->fscale = (user64_long_t)la->fscale;
318}
319
6d2010ae 320STATIC void
b0d623f7
A
321fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
322{
323 la32->ldavg[0] = la->ldavg[0];
324 la32->ldavg[1] = la->ldavg[1];
325 la32->ldavg[2] = la->ldavg[2];
326 la32->fscale = (user32_long_t)la->fscale;
91447636
A
327}
328
39037602 329#if CONFIG_COREDUMP
1c79356b
A
330/*
331 * Attributes stored in the kernel.
332 */
39236c6e
A
333extern char corefilename[MAXPATHLEN+1];
334extern int do_coredump;
335extern int sugid_coredump;
39037602 336#endif
e5568f75 337
2d21ac55 338#if COUNT_SYSCALLS
39236c6e 339extern int do_count_syscalls;
2d21ac55 340#endif
55e303ae 341
1c79356b
A
342#ifdef INSECURE
343int securelevel = -1;
344#else
345int securelevel;
346#endif
347
6d2010ae 348STATIC int
b0d623f7
A
349sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
350 __unused int arg2, struct sysctl_req *req)
351{
352 int error;
353 struct uthread *ut = get_bsdthread_info(current_thread());
354 user_addr_t oldp=0, newp=0;
355 size_t *oldlenp=NULL;
356 size_t newlen=0;
357
358 oldp = req->oldptr;
359 oldlenp = &(req->oldlen);
360 newp = req->newptr;
361 newlen = req->newlen;
362
363 /* We want the current length, and maybe the string itself */
364 if(oldlenp) {
365 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
366 size_t currlen = MAXTHREADNAMESIZE - 1;
367
368 if(ut->pth_name)
369 /* use length of current thread name */
370 currlen = strlen(ut->pth_name);
371 if(oldp) {
372 if(*oldlenp < currlen)
373 return ENOMEM;
374 /* NOTE - we do not copy the NULL terminator */
375 if(ut->pth_name) {
376 error = copyout(ut->pth_name,oldp,currlen);
377 if(error)
378 return error;
379 }
380 }
381 /* return length of thread name minus NULL terminator (just like strlen) */
382 req->oldidx = currlen;
383 }
384
385 /* We want to set the name to something */
386 if(newp)
387 {
388 if(newlen > (MAXTHREADNAMESIZE - 1))
389 return ENAMETOOLONG;
390 if(!ut->pth_name)
391 {
392 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
393 if(!ut->pth_name)
394 return ENOMEM;
39037602
A
395 } else {
396 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
b0d623f7
A
397 }
398 bzero(ut->pth_name, MAXTHREADNAMESIZE);
399 error = copyin(newp, ut->pth_name, newlen);
39037602 400 if (error) {
b0d623f7 401 return error;
39037602
A
402 }
403
404 kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
b0d623f7
A
405 }
406
407 return 0;
408}
409
6d2010ae 410SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
b0d623f7 411
6d2010ae
A
412#define BSD_HOST 1
413STATIC int
414sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
415{
416 host_basic_info_data_t hinfo;
417 kern_return_t kret;
418 uint32_t size;
419 int changed;
420 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
421 struct _processor_statistics_np *buf;
422 int error;
55e303ae 423
6d2010ae
A
424 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
425 if (kret != KERN_SUCCESS) {
426 return EINVAL;
427 }
1c79356b 428
6d2010ae
A
429 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
430
431 if (req->oldlen < size) {
432 return EINVAL;
433 }
434
435 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
436
437 kret = get_sched_statistics(buf, &size);
438 if (kret != KERN_SUCCESS) {
439 error = EINVAL;
440 goto out;
441 }
442
443 error = sysctl_io_opaque(req, buf, size, &changed);
444 if (error) {
445 goto out;
446 }
447
448 if (changed) {
449 panic("Sched info changed?!");
450 }
451out:
452 FREE(buf, M_TEMP);
453 return error;
454}
455
456SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
457
458STATIC int
459sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
460{
461 boolean_t active;
462 int res;
463
464 if (req->newlen != sizeof(active)) {
465 return EINVAL;
466 }
467
468 res = copyin(req->newptr, &active, sizeof(active));
469 if (res != 0) {
470 return res;
471 }
472
473 return set_sched_stats_active(active);
474}
475
476SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
477
3e170ce0
A
478extern uint32_t sched_debug_flags;
479SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
480
481#if (DEBUG || DEVELOPMENT)
482extern boolean_t doprnt_hide_pointers;
483SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
484#endif
485
6d2010ae 486extern int get_kernel_symfile(proc_t, char **);
1c79356b 487
2d21ac55 488#if COUNT_SYSCALLS
6d2010ae
A
489#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
490
39037602 491extern unsigned int nsysent;
6d2010ae
A
492extern int syscalls_log[];
493extern const char *syscallnames[];
494
495STATIC int
496sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
497{
498 __unused int cmd = oidp->oid_arg2; /* subcommand*/
499 __unused int *name = arg1; /* oid element argument vector */
500 __unused int namelen = arg2; /* number of oid element arguments */
501 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
502 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
503 user_addr_t newp = req->newptr; /* user buffer copy in address */
504 size_t newlen = req->newlen; /* user buffer copy in size */
505 int error;
506
507 int tmp;
508
509 /* valid values passed in:
510 * = 0 means don't keep called counts for each bsd syscall
511 * > 0 means keep called counts for each bsd syscall
512 * = 2 means dump current counts to the system log
513 * = 3 means reset all counts
514 * for example, to dump current counts:
515 * sysctl -w kern.count_calls=2
516 */
517 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
518 if ( error != 0 ) {
519 return (error);
520 }
521
522 if ( tmp == 1 ) {
523 do_count_syscalls = 1;
524 }
525 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
526 int i;
527 for ( i = 0; i < nsysent; i++ ) {
528 if ( syscalls_log[i] != 0 ) {
529 if ( tmp == 2 ) {
530 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
531 }
532 else {
533 syscalls_log[i] = 0;
2d21ac55 534 }
2d21ac55
A
535 }
536 }
6d2010ae
A
537 if ( tmp != 0 ) {
538 do_count_syscalls = 1;
539 }
1c79356b 540 }
6d2010ae
A
541
542 /* adjust index so we return the right required/consumed amount */
543 if (!error)
544 req->oldidx += req->oldlen;
545
546 return (error);
1c79356b 547}
6d2010ae
A
548SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
549 0, /* Pointer argument (arg1) */
550 0, /* Integer argument (arg2) */
551 sysctl_docountsyscalls, /* Handler function */
552 NULL, /* Data pointer */
553 "");
554#endif /* COUNT_SYSCALLS */
1c79356b 555
2d21ac55
A
556/*
557 * The following sysctl_* functions should not be used
558 * any more, as they can only cope with callers in
559 * user mode: Use new-style
560 * sysctl_io_number()
561 * sysctl_io_string()
562 * sysctl_io_opaque()
563 * instead.
564 */
565
1c79356b
A
566/*
567 * Validate parameters and get old / set new parameters
568 * for an integer-valued sysctl function.
569 */
9bccf70c 570int
91447636
A
571sysctl_int(user_addr_t oldp, size_t *oldlenp,
572 user_addr_t newp, size_t newlen, int *valp)
1c79356b
A
573{
574 int error = 0;
575
91447636
A
576 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
577 return (EFAULT);
1c79356b
A
578 if (oldp && *oldlenp < sizeof(int))
579 return (ENOMEM);
580 if (newp && newlen != sizeof(int))
581 return (EINVAL);
582 *oldlenp = sizeof(int);
583 if (oldp)
584 error = copyout(valp, oldp, sizeof(int));
e5568f75 585 if (error == 0 && newp) {
1c79356b 586 error = copyin(newp, valp, sizeof(int));
b0d623f7 587 AUDIT_ARG(value32, *valp);
e5568f75 588 }
1c79356b
A
589 return (error);
590}
591
9bccf70c
A
592/*
593 * Validate parameters and get old / set new parameters
594 * for an quad(64bit)-valued sysctl function.
595 */
596int
91447636
A
597sysctl_quad(user_addr_t oldp, size_t *oldlenp,
598 user_addr_t newp, size_t newlen, quad_t *valp)
9bccf70c
A
599{
600 int error = 0;
601
91447636
A
602 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
603 return (EFAULT);
9bccf70c
A
604 if (oldp && *oldlenp < sizeof(quad_t))
605 return (ENOMEM);
606 if (newp && newlen != sizeof(quad_t))
607 return (EINVAL);
608 *oldlenp = sizeof(quad_t);
609 if (oldp)
610 error = copyout(valp, oldp, sizeof(quad_t));
611 if (error == 0 && newp)
612 error = copyin(newp, valp, sizeof(quad_t));
613 return (error);
614}
615
6d2010ae 616STATIC int
2d21ac55
A
617sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
618{
b0d623f7 619 if (p->p_pid != (pid_t)*(int*)arg)
2d21ac55
A
620 return(0);
621 else
622 return(1);
623}
624
6d2010ae 625STATIC int
2d21ac55
A
626sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
627{
b0d623f7 628 if (p->p_pgrpid != (pid_t)*(int*)arg)
2d21ac55
A
629 return(0);
630 else
631 return(1);
632}
633
6d2010ae 634STATIC int
2d21ac55
A
635sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
636{
2d21ac55 637 int retval;
b0d623f7 638 struct tty *tp;
2d21ac55 639
2d21ac55
A
640 /* This is very racy but list lock is held.. Hmmm. */
641 if ((p->p_flag & P_CONTROLT) == 0 ||
642 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
b0d623f7
A
643 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
644 tp->t_dev != (dev_t)*(int*)arg)
2d21ac55
A
645 retval = 0;
646 else
647 retval = 1;
648
2d21ac55
A
649 return(retval);
650}
651
6d2010ae 652STATIC int
2d21ac55
A
653sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
654{
655 kauth_cred_t my_cred;
656 uid_t uid;
657
658 if (p->p_ucred == NULL)
659 return(0);
660 my_cred = kauth_cred_proc_ref(p);
661 uid = kauth_cred_getuid(my_cred);
662 kauth_cred_unref(&my_cred);
663
b0d623f7 664 if (uid != (uid_t)*(int*)arg)
2d21ac55
A
665 return(0);
666 else
667 return(1);
668}
669
670
6d2010ae 671STATIC int
2d21ac55
A
672sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
673{
674 kauth_cred_t my_cred;
675 uid_t ruid;
676
677 if (p->p_ucred == NULL)
678 return(0);
679 my_cred = kauth_cred_proc_ref(p);
6d2010ae 680 ruid = kauth_cred_getruid(my_cred);
2d21ac55
A
681 kauth_cred_unref(&my_cred);
682
b0d623f7 683 if (ruid != (uid_t)*(int*)arg)
2d21ac55
A
684 return(0);
685 else
686 return(1);
687}
688
1c79356b
A
689/*
690 * try over estimating by 5 procs
691 */
692#define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
2d21ac55
A
693struct sysdoproc_args {
694 int buflen;
316670eb 695 void *kprocp;
2d21ac55
A
696 boolean_t is_64_bit;
697 user_addr_t dp;
698 size_t needed;
699 int sizeof_kproc;
316670eb 700 int *errorp;
2d21ac55
A
701 int uidcheck;
702 int ruidcheck;
703 int ttycheck;
704 int uidval;
705};
706
707int
316670eb 708sysdoproc_callback(proc_t p, void *arg)
2d21ac55 709{
316670eb 710 struct sysdoproc_args *args = arg;
2d21ac55
A
711
712 if (args->buflen >= args->sizeof_kproc) {
316670eb
A
713 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
714 return (PROC_RETURNED);
715 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
716 return (PROC_RETURNED);
717 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
718 return (PROC_RETURNED);
2d21ac55
A
719
720 bzero(args->kprocp, args->sizeof_kproc);
316670eb
A
721 if (args->is_64_bit)
722 fill_user64_proc(p, args->kprocp);
723 else
724 fill_user32_proc(p, args->kprocp);
725 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
2d21ac55
A
726 if (error) {
727 *args->errorp = error;
316670eb 728 return (PROC_RETURNED_DONE);
2d21ac55
A
729 }
730 args->dp += args->sizeof_kproc;
731 args->buflen -= args->sizeof_kproc;
732 }
733 args->needed += args->sizeof_kproc;
316670eb 734 return (PROC_RETURNED);
2d21ac55 735}
1c79356b 736
6d2010ae
A
737SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
738STATIC int
739sysctl_prochandle SYSCTL_HANDLER_ARGS
1c79356b 740{
6d2010ae
A
741 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
742 int *name = arg1; /* oid element argument vector */
743 int namelen = arg2; /* number of oid element arguments */
744 user_addr_t where = req->oldptr;/* user buffer copy out address */
745
91447636
A
746 user_addr_t dp = where;
747 size_t needed = 0;
6d2010ae 748 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
1c79356b 749 int error = 0;
316670eb 750 boolean_t is_64_bit = proc_is64bit(current_proc());
b0d623f7
A
751 struct user32_kinfo_proc user32_kproc;
752 struct user64_kinfo_proc user_kproc;
91447636 753 int sizeof_kproc;
316670eb 754 void *kprocp;
2d21ac55
A
755 int (*filterfn)(proc_t, void *) = 0;
756 struct sysdoproc_args args;
757 int uidcheck = 0;
758 int ruidcheck = 0;
759 int ttycheck = 0;
5ba3f43e 760 int success = 0;
1c79356b 761
6d2010ae 762 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
1c79356b 763 return (EINVAL);
6d2010ae 764
91447636
A
765 if (is_64_bit) {
766 sizeof_kproc = sizeof(user_kproc);
316670eb
A
767 kprocp = &user_kproc;
768 } else {
b0d623f7 769 sizeof_kproc = sizeof(user32_kproc);
316670eb 770 kprocp = &user32_kproc;
91447636 771 }
2d21ac55 772
6d2010ae 773 switch (cmd) {
1c79356b
A
774
775 case KERN_PROC_PID:
2d21ac55 776 filterfn = sysdoproc_filt_KERN_PROC_PID;
1c79356b
A
777 break;
778
779 case KERN_PROC_PGRP:
2d21ac55 780 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
1c79356b 781 break;
2d21ac55 782
1c79356b 783 case KERN_PROC_TTY:
2d21ac55 784 ttycheck = 1;
1c79356b
A
785 break;
786
787 case KERN_PROC_UID:
2d21ac55 788 uidcheck = 1;
1c79356b
A
789 break;
790
791 case KERN_PROC_RUID:
2d21ac55 792 ruidcheck = 1;
1c79356b 793 break;
2d21ac55 794
6d2010ae
A
795 case KERN_PROC_ALL:
796 break;
797
798 default:
799 /* must be kern.proc.<unknown> */
800 return (ENOTSUP);
1c79356b 801 }
2d21ac55
A
802
803 error = 0;
804 args.buflen = buflen;
805 args.kprocp = kprocp;
806 args.is_64_bit = is_64_bit;
807 args.dp = dp;
808 args.needed = needed;
809 args.errorp = &error;
810 args.uidcheck = uidcheck;
811 args.ruidcheck = ruidcheck;
812 args.ttycheck = ttycheck;
813 args.sizeof_kproc = sizeof_kproc;
6d2010ae 814 if (namelen)
316670eb 815 args.uidval = name[0];
2d21ac55 816
5ba3f43e
A
817 success = proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
818 sysdoproc_callback, &args, filterfn, name);
819
820 /*
821 * rdar://problem/28433391: if we can't iterate over the processes,
822 * make sure to return an error.
823 */
824
825 if (success != 0)
826 return (ENOMEM);
2d21ac55
A
827
828 if (error)
316670eb 829 return (error);
2d21ac55
A
830
831 dp = args.dp;
832 needed = args.needed;
833
91447636 834 if (where != USER_ADDR_NULL) {
6d2010ae
A
835 req->oldlen = dp - where;
836 if (needed > req->oldlen)
1c79356b
A
837 return (ENOMEM);
838 } else {
839 needed += KERN_PROCSLOP;
6d2010ae 840 req->oldlen = needed;
1c79356b 841 }
6d2010ae
A
842 /* adjust index so we return the right required/consumed amount */
843 req->oldidx += req->oldlen;
1c79356b
A
844 return (0);
845}
316670eb 846
6d2010ae
A
847/*
848 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
849 * in the sysctl declaration itself, which comes into the handler function
850 * as 'oidp->oid_arg2'.
851 *
852 * For these particular sysctls, since they have well known OIDs, we could
853 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
854 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
855 * of a well known value with a common handler function. This is desirable,
856 * because we want well known values to "go away" at some future date.
857 *
858 * It should be noted that the value of '((int *)arg1)[1]' is used for many
859 * an integer parameter to the subcommand for many of these sysctls; we'd
860 * rather have used '((int *)arg1)[0]' for that, or even better, an element
861 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
862 * and then use leaf-node permissions enforcement, but that would have
863 * necessitated modifying user space code to correspond to the interface
864 * change, and we are striving for binary backward compatibility here; even
865 * though these are SPI, and not intended for use by user space applications
866 * which are not themselves system tools or libraries, some applications
867 * have erroneously used them.
868 */
869SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_ALL, /* Integer argument (arg2) */
872 sysctl_prochandle, /* Handler function */
873 NULL, /* Data is size variant on ILP32/LP64 */
874 "");
875SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_PID, /* Integer argument (arg2) */
878 sysctl_prochandle, /* Handler function */
879 NULL, /* Data is size variant on ILP32/LP64 */
880 "");
881SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_TTY, /* Integer argument (arg2) */
884 sysctl_prochandle, /* Handler function */
885 NULL, /* Data is size variant on ILP32/LP64 */
886 "");
887SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_PGRP, /* Integer argument (arg2) */
890 sysctl_prochandle, /* Handler function */
891 NULL, /* Data is size variant on ILP32/LP64 */
892 "");
893SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
894 0, /* Pointer argument (arg1) */
895 KERN_PROC_UID, /* Integer argument (arg2) */
896 sysctl_prochandle, /* Handler function */
897 NULL, /* Data is size variant on ILP32/LP64 */
898 "");
899SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
900 0, /* Pointer argument (arg1) */
901 KERN_PROC_RUID, /* Integer argument (arg2) */
902 sysctl_prochandle, /* Handler function */
903 NULL, /* Data is size variant on ILP32/LP64 */
904 "");
905SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_LCID, /* Integer argument (arg2) */
908 sysctl_prochandle, /* Handler function */
909 NULL, /* Data is size variant on ILP32/LP64 */
910 "");
911
1c79356b 912
1c79356b 913/*
316670eb 914 * Fill in non-zero fields of an eproc structure for the specified process.
1c79356b 915 */
6d2010ae 916STATIC void
316670eb 917fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
1c79356b 918{
2d21ac55 919 struct tty *tp;
316670eb
A
920 struct pgrp *pg;
921 struct session *sessp;
2d21ac55 922 kauth_cred_t my_cred;
2d21ac55
A
923
924 pg = proc_pgrp(p);
925 sessp = proc_session(p);
1c79356b 926
2d21ac55 927 if (pg != PGRP_NULL) {
2d21ac55
A
928 ep->e_pgid = p->p_pgrpid;
929 ep->e_jobc = pg->pg_jobc;
316670eb 930 if (sessp != SESSION_NULL && sessp->s_ttyvp)
55e303ae 931 ep->e_flag = EPROC_CTTY;
55e303ae 932 }
2d21ac55 933 ep->e_ppid = p->p_ppid;
91447636 934 if (p->p_ucred) {
2d21ac55 935 my_cred = kauth_cred_proc_ref(p);
91447636
A
936
937 /* A fake historical pcred */
6d2010ae
A
938 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
939 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
940 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
941 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
316670eb 942
91447636 943 /* A fake historical *kauth_cred_t */
2d21ac55
A
944 ep->e_ucred.cr_ref = my_cred->cr_ref;
945 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
6d2010ae 946 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
316670eb
A
947 bcopy(posix_cred_get(my_cred)->cr_groups,
948 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
91447636 949
2d21ac55 950 kauth_cred_unref(&my_cred);
55e303ae 951 }
55e303ae 952
2d21ac55 953 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
b0d623f7 954 (tp = SESSION_TP(sessp))) {
1c79356b 955 ep->e_tdev = tp->t_dev;
2d21ac55 956 ep->e_tpgid = sessp->s_ttypgrpid;
1c79356b
A
957 } else
958 ep->e_tdev = NODEV;
55e303ae 959
316670eb
A
960 if (sessp != SESSION_NULL) {
961 if (SESS_LEADER(p, sessp))
962 ep->e_flag |= EPROC_SLEADER;
2d21ac55 963 session_rele(sessp);
316670eb
A
964 }
965 if (pg != PGRP_NULL)
2d21ac55 966 pg_rele(pg);
1c79356b 967}
55e303ae 968
91447636 969/*
316670eb 970 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
91447636 971 */
6d2010ae 972STATIC void
316670eb 973fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
91447636 974{
2d21ac55 975 struct tty *tp;
316670eb
A
976 struct pgrp *pg;
977 struct session *sessp;
2d21ac55
A
978 kauth_cred_t my_cred;
979
980 pg = proc_pgrp(p);
981 sessp = proc_session(p);
91447636 982
2d21ac55 983 if (pg != PGRP_NULL) {
2d21ac55
A
984 ep->e_pgid = p->p_pgrpid;
985 ep->e_jobc = pg->pg_jobc;
316670eb
A
986 if (sessp != SESSION_NULL && sessp->s_ttyvp)
987 ep->e_flag = EPROC_CTTY;
91447636 988 }
2d21ac55 989 ep->e_ppid = p->p_ppid;
91447636 990 if (p->p_ucred) {
2d21ac55 991 my_cred = kauth_cred_proc_ref(p);
91447636
A
992
993 /* A fake historical pcred */
6d2010ae
A
994 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
995 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
996 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
997 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
91447636
A
998
999 /* A fake historical *kauth_cred_t */
2d21ac55
A
1000 ep->e_ucred.cr_ref = my_cred->cr_ref;
1001 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
6d2010ae 1002 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
316670eb
A
1003 bcopy(posix_cred_get(my_cred)->cr_groups,
1004 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
91447636 1005
2d21ac55 1006 kauth_cred_unref(&my_cred);
91447636 1007 }
91447636 1008
2d21ac55 1009 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
b0d623f7 1010 (tp = SESSION_TP(sessp))) {
91447636 1011 ep->e_tdev = tp->t_dev;
2d21ac55 1012 ep->e_tpgid = sessp->s_ttypgrpid;
91447636
A
1013 } else
1014 ep->e_tdev = NODEV;
1015
316670eb
A
1016 if (sessp != SESSION_NULL) {
1017 if (SESS_LEADER(p, sessp))
1018 ep->e_flag |= EPROC_SLEADER;
2d21ac55 1019 session_rele(sessp);
316670eb 1020 }
2d21ac55
A
1021 if (pg != PGRP_NULL)
1022 pg_rele(pg);
91447636
A
1023}
1024
1c79356b
A
1025/*
1026 * Fill in an eproc structure for the specified process.
316670eb 1027 * bzeroed by our caller, so only set non-zero fields.
1c79356b 1028 */
6d2010ae 1029STATIC void
316670eb 1030fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1c79356b 1031{
b0d623f7
A
1032 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1033 exp->p_starttime.tv_usec = p->p_start.tv_usec;
316670eb 1034 exp->p_flag = p->p_flag;
2d21ac55
A
1035 if (p->p_lflag & P_LTRACED)
1036 exp->p_flag |= P_TRACED;
1037 if (p->p_lflag & P_LPPWAIT)
1038 exp->p_flag |= P_PPWAIT;
1039 if (p->p_lflag & P_LEXIT)
1040 exp->p_flag |= P_WEXIT;
316670eb
A
1041 exp->p_stat = p->p_stat;
1042 exp->p_pid = p->p_pid;
1043 exp->p_oppid = p->p_oppid;
1c79356b 1044 /* Mach related */
316670eb
A
1045 exp->user_stack = p->user_stack;
1046 exp->p_debugger = p->p_debugger;
1047 exp->sigwait = p->sigwait;
1c79356b 1048 /* scheduling */
2d21ac55 1049#ifdef _PROC_HAS_SCHEDINFO_
316670eb
A
1050 exp->p_estcpu = p->p_estcpu;
1051 exp->p_pctcpu = p->p_pctcpu;
1052 exp->p_slptime = p->p_slptime;
2d21ac55 1053#endif
316670eb
A
1054 exp->p_realtimer.it_interval.tv_sec =
1055 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1056 exp->p_realtimer.it_interval.tv_usec =
1057 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1058
1059 exp->p_realtimer.it_value.tv_sec =
1060 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1061 exp->p_realtimer.it_value.tv_usec =
1062 (__int32_t)p->p_realtimer.it_value.tv_usec;
1063
1064 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1065 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1066
1067 exp->p_sigignore = p->p_sigignore;
1068 exp->p_sigcatch = p->p_sigcatch;
1069 exp->p_priority = p->p_priority;
1070 exp->p_nice = p->p_nice;
1071 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1072 exp->p_xstat = p->p_xstat;
1073 exp->p_acflag = p->p_acflag;
91447636
A
1074}
1075
1076/*
1077 * Fill in an LP64 version of extern_proc structure for the specified process.
1078 */
6d2010ae 1079STATIC void
316670eb 1080fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
91447636 1081{
2d21ac55
A
1082 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1083 exp->p_starttime.tv_usec = p->p_start.tv_usec;
316670eb 1084 exp->p_flag = p->p_flag;
2d21ac55
A
1085 if (p->p_lflag & P_LTRACED)
1086 exp->p_flag |= P_TRACED;
1087 if (p->p_lflag & P_LPPWAIT)
1088 exp->p_flag |= P_PPWAIT;
1089 if (p->p_lflag & P_LEXIT)
1090 exp->p_flag |= P_WEXIT;
316670eb
A
1091 exp->p_stat = p->p_stat;
1092 exp->p_pid = p->p_pid;
1093 exp->p_oppid = p->p_oppid;
91447636 1094 /* Mach related */
316670eb
A
1095 exp->user_stack = p->user_stack;
1096 exp->p_debugger = p->p_debugger;
1097 exp->sigwait = p->sigwait;
91447636 1098 /* scheduling */
2d21ac55 1099#ifdef _PROC_HAS_SCHEDINFO_
316670eb
A
1100 exp->p_estcpu = p->p_estcpu;
1101 exp->p_pctcpu = p->p_pctcpu;
1102 exp->p_slptime = p->p_slptime;
2d21ac55 1103#endif
91447636
A
1104 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1105 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
316670eb 1106
91447636
A
1107 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1108 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
316670eb 1109
91447636
A
1110 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1111 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
316670eb
A
1112
1113 exp->p_sigignore = p->p_sigignore;
1114 exp->p_sigcatch = p->p_sigcatch;
1115 exp->p_priority = p->p_priority;
1116 exp->p_nice = p->p_nice;
1117 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1118 exp->p_xstat = p->p_xstat;
1119 exp->p_acflag = p->p_acflag;
1c79356b
A
1120}
1121
6d2010ae 1122STATIC void
316670eb 1123fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
55e303ae 1124{
316670eb 1125 /* on a 64 bit kernel, 32 bit users get some truncated information */
b0d623f7
A
1126 fill_user32_externproc(p, &kp->kp_proc);
1127 fill_user32_eproc(p, &kp->kp_eproc);
55e303ae
A
1128}
1129
6d2010ae 1130STATIC void
316670eb 1131fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
91447636 1132{
b0d623f7
A
1133 fill_user64_externproc(p, &kp->kp_proc);
1134 fill_user64_eproc(p, &kp->kp_eproc);
91447636
A
1135}
1136
6d2010ae
A
1137STATIC int
1138sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1c79356b 1139{
6d2010ae
A
1140 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1141 int *name = arg1; /* oid element argument vector */
1142 int namelen = arg2; /* number of oid element arguments */
1143 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1144 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1145// user_addr_t newp = req->newptr; /* user buffer copy in address */
1146// size_t newlen = req->newlen; /* user buffer copy in size */
1147
9bccf70c 1148 int ret=0;
1c79356b 1149
c910b4d9
A
1150 if (namelen == 0)
1151 return(ENOTSUP);
39037602 1152
1c79356b
A
1153 switch(name[0]) {
1154 case KERN_KDEFLAGS:
1155 case KERN_KDDFLAGS:
1156 case KERN_KDENABLE:
1157 case KERN_KDGETBUF:
1158 case KERN_KDSETUP:
1159 case KERN_KDREMOVE:
1160 case KERN_KDSETREG:
1161 case KERN_KDGETREG:
1162 case KERN_KDREADTR:
3e170ce0
A
1163 case KERN_KDWRITETR:
1164 case KERN_KDWRITEMAP:
39037602 1165 case KERN_KDTEST:
1c79356b
A
1166 case KERN_KDPIDTR:
1167 case KERN_KDTHRMAP:
1168 case KERN_KDPIDEX:
1c79356b 1169 case KERN_KDSETBUF:
39236c6e 1170 case KERN_KDREADCURTHRMAP:
316670eb 1171 case KERN_KDSET_TYPEFILTER:
3e170ce0 1172 case KERN_KDBUFWAIT:
39236c6e 1173 case KERN_KDCPUMAP:
3e170ce0
A
1174 case KERN_KDWRITEMAP_V3:
1175 case KERN_KDWRITETR_V3:
39037602
A
1176 ret = kdbg_control(name, namelen, oldp, oldlenp);
1177 break;
1c79356b 1178 default:
91447636 1179 ret= ENOTSUP;
1c79356b
A
1180 break;
1181 }
6d2010ae
A
1182
1183 /* adjust index so we return the right required/consumed amount */
1184 if (!ret)
1185 req->oldidx += req->oldlen;
1186
1187 return (ret);
1c79356b 1188}
6d2010ae
A
1189SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1190 0, /* Pointer argument (arg1) */
1191 0, /* Integer argument (arg2) */
1192 sysctl_kdebug_ops, /* Handler function */
1193 NULL, /* Data pointer */
1194 "");
1c79356b 1195
1c79356b 1196
5ba3f43e 1197#if !CONFIG_EMBEDDED
1c79356b 1198/*
55e303ae
A
1199 * Return the top *sizep bytes of the user stack, or the entire area of the
1200 * user stack down through the saved exec_path, whichever is smaller.
1c79356b 1201 */
6d2010ae
A
1202STATIC int
1203sysctl_doprocargs SYSCTL_HANDLER_ARGS
1204{
1205 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1206 int *name = arg1; /* oid element argument vector */
1207 int namelen = arg2; /* number of oid element arguments */
1208 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1209 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1210// user_addr_t newp = req->newptr; /* user buffer copy in address */
1211// size_t newlen = req->newlen; /* user buffer copy in size */
1212 int error;
1213
1214 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1215
1216 /* adjust index so we return the right required/consumed amount */
1217 if (!error)
1218 req->oldidx += req->oldlen;
1219
1220 return (error);
55e303ae 1221}
6d2010ae
A
1222SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1223 0, /* Pointer argument (arg1) */
1224 0, /* Integer argument (arg2) */
1225 sysctl_doprocargs, /* Handler function */
1226 NULL, /* Data pointer */
1227 "");
5ba3f43e 1228#endif /* !CONFIG_EMBEDDED */
6d2010ae
A
1229
1230STATIC int
1231sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1232{
1233 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1234 int *name = arg1; /* oid element argument vector */
1235 int namelen = arg2; /* number of oid element arguments */
1236 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1237 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1238// user_addr_t newp = req->newptr; /* user buffer copy in address */
1239// size_t newlen = req->newlen; /* user buffer copy in size */
1240 int error;
55e303ae 1241
6d2010ae
A
1242 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1243
1244 /* adjust index so we return the right required/consumed amount */
1245 if (!error)
1246 req->oldidx += req->oldlen;
1247
1248 return (error);
55e303ae 1249}
6d2010ae
A
1250SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1251 0, /* Pointer argument (arg1) */
1252 0, /* Integer argument (arg2) */
1253 sysctl_doprocargs2, /* Handler function */
1254 NULL, /* Data pointer */
1255 "");
55e303ae 1256
6d2010ae 1257STATIC int
c910b4d9 1258sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
2d21ac55 1259 size_t *sizep, proc_t cur_proc, int argc_yes)
1c79356b 1260{
2d21ac55 1261 proc_t p;
91447636 1262 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1c79356b 1263 int error = 0;
2d21ac55 1264 struct _vm_map *proc_map;
1c79356b
A
1265 struct task * task;
1266 vm_map_copy_t tmp;
91447636
A
1267 user_addr_t arg_addr;
1268 size_t arg_size;
1c79356b 1269 caddr_t data;
2d21ac55 1270 size_t argslen=0;
91447636 1271 int size;
1c79356b 1272 vm_offset_t copy_start, copy_end;
1c79356b
A
1273 kern_return_t ret;
1274 int pid;
2d21ac55
A
1275 kauth_cred_t my_cred;
1276 uid_t uid;
5ba3f43e 1277 int argc = -1;
1c79356b 1278
c910b4d9
A
1279 if ( namelen < 1 )
1280 return(EINVAL);
b0d623f7 1281
55e303ae 1282 if (argc_yes)
91447636 1283 buflen -= sizeof(int); /* reserve first word to return argc */
1c79356b 1284
91447636
A
1285 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1286 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1287 /* is not NULL then the caller wants us to return the length needed to */
1288 /* hold the data we would return */
1289 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1c79356b
A
1290 return(EINVAL);
1291 }
1292 arg_size = buflen;
1293
1294 /*
1295 * Lookup process by pid
1296 */
1297 pid = name[0];
2d21ac55 1298 p = proc_find(pid);
1c79356b
A
1299 if (p == NULL) {
1300 return(EINVAL);
1301 }
1302
1303 /*
1304 * Copy the top N bytes of the stack.
1305 * On all machines we have so far, the stack grows
1306 * downwards.
1307 *
1308 * If the user expects no more than N bytes of
1309 * argument list, use that as a guess for the
1310 * size.
1311 */
1312
2d21ac55
A
1313 if (!p->user_stack) {
1314 proc_rele(p);
1c79356b 1315 return(EINVAL);
2d21ac55 1316 }
1c79356b 1317
91447636
A
1318 if (where == USER_ADDR_NULL) {
1319 /* caller only wants to know length of proc args data */
2d21ac55
A
1320 if (sizep == NULL) {
1321 proc_rele(p);
91447636 1322 return(EFAULT);
2d21ac55 1323 }
5ba3f43e
A
1324
1325 size = p->p_argslen;
2d21ac55 1326 proc_rele(p);
5ba3f43e
A
1327 if (argc_yes) {
1328 size += sizeof(int);
1329 } else {
91447636
A
1330 /*
1331 * old PROCARGS will return the executable's path and plus some
1332 * extra space for work alignment and data tags
1333 */
5ba3f43e
A
1334 size += PATH_MAX + (6 * sizeof(int));
1335 }
91447636
A
1336 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1337 *sizep = size;
1338 return (0);
1339 }
5ba3f43e 1340
2d21ac55
A
1341 my_cred = kauth_cred_proc_ref(p);
1342 uid = kauth_cred_getuid(my_cred);
1343 kauth_cred_unref(&my_cred);
1344
1345 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1346 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1347 proc_rele(p);
9bccf70c 1348 return (EINVAL);
2d21ac55 1349 }
91447636
A
1350
1351 if ((u_int)arg_size > p->p_argslen)
1352 arg_size = round_page(p->p_argslen);
1353
1354 arg_addr = p->user_stack - arg_size;
1c79356b 1355
1c79356b
A
1356 /*
1357 * Before we can block (any VM code), make another
1358 * reference to the map to keep it alive. We do
1359 * that by getting a reference on the task itself.
1360 */
1361 task = p->task;
2d21ac55
A
1362 if (task == NULL) {
1363 proc_rele(p);
1c79356b 1364 return(EINVAL);
2d21ac55 1365 }
5ba3f43e
A
1366
1367 /* save off argc before releasing the proc */
1368 argc = p->p_argc;
1369
2d21ac55 1370 argslen = p->p_argslen;
0b4e3aa0 1371 /*
91447636
A
1372 * Once we have a task reference we can convert that into a
1373 * map reference, which we will use in the calls below. The
1374 * task/process may change its map after we take this reference
1375 * (see execve), but the worst that will happen then is a return
1376 * of stale info (which is always a possibility).
0b4e3aa0 1377 */
91447636 1378 task_reference(task);
2d21ac55 1379 proc_rele(p);
91447636
A
1380 proc_map = get_task_map_reference(task);
1381 task_deallocate(task);
2d21ac55 1382
91447636
A
1383 if (proc_map == NULL)
1384 return(EINVAL);
1c79356b 1385
91447636 1386
3e170ce0 1387 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size), VM_KERN_MEMORY_BSD);
1c79356b 1388 if (ret != KERN_SUCCESS) {
91447636 1389 vm_map_deallocate(proc_map);
1c79356b
A
1390 return(ENOMEM);
1391 }
1392
91447636 1393 copy_end = round_page(copy_start + arg_size);
1c79356b 1394
91447636
A
1395 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1396 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1397 vm_map_deallocate(proc_map);
1c79356b 1398 kmem_free(kernel_map, copy_start,
91447636 1399 round_page(arg_size));
1c79356b
A
1400 return (EIO);
1401 }
1402
1403 /*
1404 * Now that we've done the copyin from the process'
1405 * map, we can release the reference to it.
1406 */
91447636 1407 vm_map_deallocate(proc_map);
1c79356b 1408
91447636
A
1409 if( vm_map_copy_overwrite(kernel_map,
1410 (vm_map_address_t)copy_start,
1411 tmp, FALSE) != KERN_SUCCESS) {
1c79356b 1412 kmem_free(kernel_map, copy_start,
91447636 1413 round_page(arg_size));
2dced7af 1414 vm_map_copy_discard(tmp);
1c79356b
A
1415 return (EIO);
1416 }
1417
2d21ac55
A
1418 if (arg_size > argslen) {
1419 data = (caddr_t) (copy_end - argslen);
1420 size = argslen;
55e303ae 1421 } else {
91447636
A
1422 data = (caddr_t) (copy_end - arg_size);
1423 size = arg_size;
55e303ae 1424 }
1c79356b 1425
3e170ce0
A
1426 /*
1427 * When these sysctls were introduced, the first string in the strings
1428 * section was just the bare path of the executable. However, for security
1429 * reasons we now prefix this string with executable_path= so it can be
1430 * parsed getenv style. To avoid binary compatability issues with exising
1431 * callers of this sysctl, we strip it off here if present.
1432 * (rdar://problem/13746466)
1433 */
1434#define EXECUTABLE_KEY "executable_path="
1435 if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
1436 data += strlen(EXECUTABLE_KEY);
1437 size -= strlen(EXECUTABLE_KEY);
1438 }
1439
55e303ae
A
1440 if (argc_yes) {
1441 /* Put processes argc as the first word in the copyout buffer */
5ba3f43e 1442 suword(where, argc);
91447636
A
1443 error = copyout(data, (where + sizeof(int)), size);
1444 size += sizeof(int);
55e303ae
A
1445 } else {
1446 error = copyout(data, where, size);
1447
1448 /*
1449 * Make the old PROCARGS work to return the executable's path
1450 * But, only if there is enough space in the provided buffer
1451 *
1452 * on entry: data [possibily] points to the beginning of the path
1453 *
1454 * Note: we keep all pointers&sizes aligned to word boundries
1455 */
2d21ac55 1456 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
55e303ae 1457 {
91447636 1458 int binPath_sz, alignedBinPath_sz = 0;
55e303ae 1459 int extraSpaceNeeded, addThis;
91447636 1460 user_addr_t placeHere;
55e303ae 1461 char * str = (char *) data;
91447636 1462 int max_len = size;
55e303ae
A
1463
1464 /* Some apps are really bad about messing up their stacks
1465 So, we have to be extra careful about getting the length
1466 of the executing binary. If we encounter an error, we bail.
1467 */
1468
1469 /* Limit ourselves to PATH_MAX paths */
1470 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1471
1472 binPath_sz = 0;
1473
1474 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1475 binPath_sz++;
1476
91447636 1477 /* If we have a NUL terminator, copy it, too */
55e303ae
A
1478 if (binPath_sz < max_len-1) binPath_sz += 1;
1479
1480 /* Pre-Flight the space requiremnts */
1481
1482 /* Account for the padding that fills out binPath to the next word */
91447636 1483 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
55e303ae
A
1484
1485 placeHere = where + size;
1486
1487 /* Account for the bytes needed to keep placeHere word aligned */
91447636 1488 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
55e303ae
A
1489
1490 /* Add up all the space that is needed */
91447636 1491 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
55e303ae
A
1492
1493 /* is there is room to tack on argv[0]? */
2d21ac55 1494 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
55e303ae
A
1495 {
1496 placeHere += addThis;
1497 suword(placeHere, 0);
91447636 1498 placeHere += sizeof(int);
55e303ae 1499 suword(placeHere, 0xBFFF0000);
91447636 1500 placeHere += sizeof(int);
55e303ae 1501 suword(placeHere, 0);
91447636 1502 placeHere += sizeof(int);
55e303ae
A
1503 error = copyout(data, placeHere, binPath_sz);
1504 if ( ! error )
1505 {
1506 placeHere += binPath_sz;
1507 suword(placeHere, 0);
1508 size += extraSpaceNeeded;
1509 }
1510 }
1511 }
1512 }
1513
1514 if (copy_start != (vm_offset_t) 0) {
1515 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1c79356b
A
1516 }
1517 if (error) {
1518 return(error);
1519 }
1520
91447636 1521 if (where != USER_ADDR_NULL)
1c79356b
A
1522 *sizep = size;
1523 return (0);
1524}
55e303ae
A
1525
1526
1527/*
2d21ac55 1528 * Max number of concurrent aio requests
55e303ae 1529 */
6d2010ae 1530STATIC int
2d21ac55
A
1531sysctl_aiomax
1532(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1533{
2d21ac55
A
1534 int new_value, changed;
1535 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1536 if (changed) {
1537 /* make sure the system-wide limit is greater than the per process limit */
316670eb 1538 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
55e303ae
A
1539 aio_max_requests = new_value;
1540 else
1541 error = EINVAL;
1542 }
2d21ac55
A
1543 return(error);
1544}
55e303ae
A
1545
1546
1547/*
2d21ac55 1548 * Max number of concurrent aio requests per process
55e303ae 1549 */
6d2010ae 1550STATIC int
2d21ac55
A
1551sysctl_aioprocmax
1552(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1553{
2d21ac55
A
1554 int new_value, changed;
1555 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1556 if (changed) {
1557 /* make sure per process limit is less than the system-wide limit */
1558 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
55e303ae
A
1559 aio_max_requests_per_process = new_value;
1560 else
1561 error = EINVAL;
1562 }
2d21ac55
A
1563 return(error);
1564}
55e303ae
A
1565
1566
1567/*
2d21ac55 1568 * Max number of async IO worker threads
55e303ae 1569 */
6d2010ae 1570STATIC int
2d21ac55
A
1571sysctl_aiothreads
1572(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1573{
2d21ac55
A
1574 int new_value, changed;
1575 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1576 if (changed) {
1577 /* we only allow an increase in the number of worker threads */
55e303ae 1578 if (new_value > aio_worker_threads ) {
2d21ac55 1579 _aio_create_worker_threads((new_value - aio_worker_threads));
55e303ae
A
1580 aio_worker_threads = new_value;
1581 }
1582 else
1583 error = EINVAL;
1584 }
2d21ac55
A
1585 return(error);
1586}
55e303ae
A
1587
1588
1589/*
2d21ac55 1590 * System-wide limit on the max number of processes
55e303ae 1591 */
6d2010ae 1592STATIC int
2d21ac55
A
1593sysctl_maxproc
1594(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1595{
2d21ac55
A
1596 int new_value, changed;
1597 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1598 if (changed) {
b0d623f7 1599 AUDIT_ARG(value32, new_value);
2d21ac55
A
1600 /* make sure the system-wide limit is less than the configured hard
1601 limit set at kernel compilation */
1602 if (new_value <= hard_maxproc && new_value > 0)
1603 maxproc = new_value;
1604 else
55e303ae
A
1605 error = EINVAL;
1606 }
2d21ac55
A
1607 return(error);
1608}
55e303ae 1609
2d21ac55 1610SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
6d2010ae 1611 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
1612 ostype, 0, "");
1613SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
6d2010ae 1614 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
1615 osrelease, 0, "");
1616SYSCTL_INT(_kern, KERN_OSREV, osrevision,
6d2010ae 1617 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
b0d623f7 1618 (int *)NULL, BSD, "");
2d21ac55 1619SYSCTL_STRING(_kern, KERN_VERSION, version,
6d2010ae 1620 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55 1621 version, 0, "");
6d2010ae
A
1622SYSCTL_STRING(_kern, OID_AUTO, uuid,
1623 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
39236c6e 1624 &kernel_uuid_string[0], 0, "");
2d21ac55 1625
b0d623f7
A
1626#if DEBUG
1627int debug_kprint_syscall = 0;
1628char debug_kprint_syscall_process[MAXCOMLEN+1];
1629
6d2010ae 1630/* Thread safe: bits and string value are not used to reclaim state */
b0d623f7 1631SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
6d2010ae 1632 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
b0d623f7 1633SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
6d2010ae 1634 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
b0d623f7
A
1635 "name of process for kprintf syscall tracing");
1636
1637int debug_kprint_current_process(const char **namep)
1638{
1639 struct proc *p = current_proc();
1640
1641 if (p == NULL) {
1642 return 0;
1643 }
1644
1645 if (debug_kprint_syscall_process[0]) {
1646 /* user asked to scope tracing to a particular process name */
1647 if(0 == strncmp(debug_kprint_syscall_process,
1648 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1649 /* no value in telling the user that we traced what they asked */
1650 if(namep) *namep = NULL;
1651
1652 return 1;
1653 } else {
1654 return 0;
1655 }
1656 }
1657
1658 /* trace all processes. Tell user what we traced */
1659 if (namep) {
1660 *namep = p->p_comm;
1661 }
1662
1663 return 1;
1664}
1665#endif
1666
2d21ac55
A
1667/* PR-5293665: need to use a callback function for kern.osversion to set
1668 * osversion in IORegistry */
55e303ae 1669
6d2010ae 1670STATIC int
2d21ac55 1671sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
55e303ae 1672{
2d21ac55 1673 int rval = 0;
55e303ae 1674
2d21ac55
A
1675 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1676
1677 if (req->newptr) {
1678 IORegistrySetOSBuildVersion((char *)arg1);
1679 }
1680
1681 return rval;
1682}
1683
1684SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
6d2010ae 1685 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2d21ac55
A
1686 osversion, 256 /* OSVERSIZE*/,
1687 sysctl_osversion, "A", "");
1688
5ba3f43e
A
1689static uint64_t osvariant_status = 0;
1690
1691STATIC int
1692sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1693{
1694 if (req->newptr != 0) {
1695 /*
1696 * Can only ever be set by launchd, and only once at boot.
1697 */
1698 if (req->p->p_pid != 1 || osvariant_status != 0) {
1699 return EPERM;
1700 }
1701 }
1702
1703 return sysctl_handle_quad(oidp, arg1, arg2, req);
1704}
1705
1706SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
1707 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1708 &osvariant_status, sizeof(osvariant_status),
1709 sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
1710
6d2010ae 1711STATIC int
2d21ac55
A
1712sysctl_sysctl_bootargs
1713(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1714{
1715 int error;
5ba3f43e
A
1716 /* BOOT_LINE_LENGTH */
1717#if CONFIG_EMBEDDED
1718 size_t boot_args_len = 256;
1719#else
1720 size_t boot_args_len = 1024;
1721#endif
1722 char buf[boot_args_len];
2d21ac55 1723
5ba3f43e
A
1724 strlcpy(buf, PE_boot_args(), boot_args_len);
1725 error = sysctl_io_string(req, buf, boot_args_len, 0, NULL);
2d21ac55
A
1726 return(error);
1727}
1728
1729SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1730 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1731 NULL, 0,
1732 sysctl_sysctl_bootargs, "A", "bootargs");
1733
1734SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
6d2010ae 1735 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
1736 &maxfiles, 0, "");
1737SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
6d2010ae 1738 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
b0d623f7 1739 (int *)NULL, ARG_MAX, "");
2d21ac55 1740SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
6d2010ae 1741 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
b0d623f7 1742 (int *)NULL, _POSIX_VERSION, "");
2d21ac55 1743SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
6d2010ae 1744 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
b0d623f7 1745 (int *)NULL, NGROUPS_MAX, "");
2d21ac55 1746SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
6d2010ae 1747 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
b0d623f7 1748 (int *)NULL, 1, "");
2d21ac55
A
1749#if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1750SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
6d2010ae 1751 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
b0d623f7 1752 (int *)NULL, 1, "");
2d21ac55
A
1753#else
1754SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
6d2010ae 1755 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
1756 NULL, 0, "");
1757#endif
b0d623f7 1758SYSCTL_INT(_kern, OID_AUTO, num_files,
6d2010ae 1759 CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1760 &nfiles, 0, "");
1761SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
6d2010ae 1762 CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1763 &numvnodes, 0, "");
1764SYSCTL_INT(_kern, OID_AUTO, num_tasks,
6d2010ae 1765 CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1766 &task_max, 0, "");
1767SYSCTL_INT(_kern, OID_AUTO, num_threads,
6d2010ae 1768 CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1769 &thread_max, 0, "");
1770SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
6d2010ae 1771 CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7 1772 &task_threadmax, 0, "");
2d21ac55 1773
6d2010ae 1774STATIC int
2d21ac55
A
1775sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1776{
b0d623f7 1777 int oldval = desiredvnodes;
2d21ac55 1778 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
b0d623f7
A
1779
1780 if (oldval != desiredvnodes) {
b0d623f7
A
1781 resize_namecache(desiredvnodes);
1782 }
1783
2d21ac55
A
1784 return(error);
1785}
1786
6d2010ae
A
1787SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1788 CTLFLAG_RW | CTLFLAG_LOCKED,
1789 &nc_disabled, 0, "");
1790
2d21ac55 1791SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
6d2010ae 1792 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1793 0, 0, sysctl_maxvnodes, "I", "");
1794
1795SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
6d2010ae 1796 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1797 0, 0, sysctl_maxproc, "I", "");
1798
1799SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
6d2010ae 1800 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1801 0, 0, sysctl_aiomax, "I", "");
1802
1803SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
6d2010ae 1804 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1805 0, 0, sysctl_aioprocmax, "I", "");
1806
1807SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
6d2010ae 1808 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1809 0, 0, sysctl_aiothreads, "I", "");
1810
fe8ab488
A
1811#if (DEVELOPMENT || DEBUG)
1812extern int sched_smt_balance;
1813SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1814 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1815 &sched_smt_balance, 0, "");
5ba3f43e
A
1816#if __arm__ || __arm64__
1817extern uint32_t perfcontrol_requested_recommended_cores;
1818SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores,
1819 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1820 &perfcontrol_requested_recommended_cores, 0, "");
1821
1822/* Scheduler perfcontrol callouts sysctls */
1823SYSCTL_DECL(_kern_perfcontrol_callout);
1824SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
1825 "scheduler perfcontrol callouts");
1826
1827extern int perfcontrol_callout_stats_enabled;
1828SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled,
1829 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1830 &perfcontrol_callout_stats_enabled, 0, "");
1831
1832extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
1833 perfcontrol_callout_stat_t stat);
1834
1835/* On-Core Callout */
1836STATIC int
1837sysctl_perfcontrol_callout_stat
1838(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1839{
1840 perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
1841 perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
1842 return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
1843 sizeof(int), NULL, NULL);
1844}
1845
1846SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
1847 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1848 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
1849 sysctl_perfcontrol_callout_stat, "I", "");
1850SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
1851 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1852 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
1853 sysctl_perfcontrol_callout_stat, "I", "");
1854SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
1855 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1856 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
1857 sysctl_perfcontrol_callout_stat, "I", "");
1858SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
1859 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1860 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
1861 sysctl_perfcontrol_callout_stat, "I", "");
1862SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
1863 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1864 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
1865 sysctl_perfcontrol_callout_stat, "I", "");
1866SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
1867 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1868 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
1869 sysctl_perfcontrol_callout_stat, "I", "");
1870SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
1871 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1872 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
1873 sysctl_perfcontrol_callout_stat, "I", "");
1874SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
1875 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1876 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
1877 sysctl_perfcontrol_callout_stat, "I", "");
1878
1879#endif /* __arm__ || __arm64__ */
1880#endif /* (DEVELOPMENT || DEBUG) */
fe8ab488 1881
6d2010ae 1882STATIC int
2d21ac55
A
1883sysctl_securelvl
1884(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1885{
1886 int new_value, changed;
1887 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1888 if (changed) {
1889 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1890 proc_list_lock();
1891 securelevel = new_value;
1892 proc_list_unlock();
1893 } else {
1894 error = EPERM;
e5568f75 1895 }
2d21ac55
A
1896 }
1897 return(error);
1898}
1899
1900SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
6d2010ae 1901 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1902 0, 0, sysctl_securelvl, "I", "");
1903
1904
6d2010ae 1905STATIC int
2d21ac55
A
1906sysctl_domainname
1907(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1908{
1909 int error, changed;
1910 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1911 if (changed) {
1912 domainnamelen = strlen(domainname);
1913 }
1914 return(error);
1915}
1916
1917SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
6d2010ae 1918 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1919 0, 0, sysctl_domainname, "A", "");
1920
b0d623f7 1921SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
6d2010ae 1922 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
1923 &hostid, 0, "");
1924
6d2010ae 1925STATIC int
2d21ac55
A
1926sysctl_hostname
1927(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1928{
1929 int error, changed;
1930 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1931 if (changed) {
1932 hostnamelen = req->newlen;
1933 }
1934 return(error);
1935}
1936
1937
1938SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
6d2010ae 1939 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
1940 0, 0, sysctl_hostname, "A", "");
1941
6d2010ae 1942STATIC int
2d21ac55
A
1943sysctl_procname
1944(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1945{
1946 /* Original code allowed writing, I'm copying this, although this all makes
1947 no sense to me. Besides, this sysctl is never used. */
1948 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1949}
1950
1951SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
6d2010ae 1952 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2d21ac55
A
1953 0, 0, sysctl_procname, "A", "");
1954
1955SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
6d2010ae 1956 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
1957 &speculative_reads_disabled, 0, "");
1958
fe8ab488 1959SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
6d2010ae 1960 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
fe8ab488 1961 &preheat_max_bytes, 0, "");
b0d623f7 1962
fe8ab488 1963SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
6d2010ae 1964 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
fe8ab488 1965 &preheat_min_bytes, 0, "");
b0d623f7 1966
6d2010ae
A
1967SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1968 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1969 &speculative_prefetch_max, 0, "");
b0d623f7 1970
316670eb
A
1971SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
1972 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1973 &speculative_prefetch_max_iosize, 0, "");
1974
6d2010ae
A
1975SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
1976 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1977 &vm_page_free_target, 0, "");
1978
1979SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
1980 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1981 &vm_page_free_min, 0, "");
1982
1983SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
1984 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1985 &vm_page_free_reserved, 0, "");
1986
1987SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
1988 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1989 &vm_page_speculative_percentage, 0, "");
1990
1991SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
1992 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1993 &vm_page_speculative_q_age_ms, 0, "");
1994
1995SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
1996 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1997 &vm_max_delayed_work_limit, 0, "");
1998
1999SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
2000 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2001 &vm_max_batch, 0, "");
2002
39236c6e
A
2003SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
2004 CTLFLAG_RD | CTLFLAG_LOCKED,
2005 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
6d2010ae
A
2006
2007STATIC int
2d21ac55
A
2008sysctl_boottime
2009(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2010{
39037602
A
2011 struct timeval tv;
2012 boottime_timeval(&tv);
b0d623f7 2013 struct proc *p = req->p;
2d21ac55 2014
b0d623f7 2015 if (proc_is64bit(p)) {
5ba3f43e 2016 struct user64_timeval t = {};
39037602
A
2017 t.tv_sec = tv.tv_sec;
2018 t.tv_usec = tv.tv_usec;
b0d623f7
A
2019 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2020 } else {
5ba3f43e 2021 struct user32_timeval t = {};
39037602
A
2022 t.tv_sec = tv.tv_sec;
2023 t.tv_usec = tv.tv_usec;
b0d623f7
A
2024 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2025 }
2d21ac55
A
2026}
2027
2028SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
fe8ab488 2029 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2030 0, 0, sysctl_boottime, "S,timeval", "");
2031
6d2010ae 2032STATIC int
2d21ac55
A
2033sysctl_symfile
2034(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2035{
2036 char *str;
2037 int error = get_kernel_symfile(req->p, &str);
2038 if (error)
2039 return (error);
2040 return sysctl_io_string(req, str, 0, 0, NULL);
2041}
2042
2043
2044SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
6d2010ae 2045 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2046 0, 0, sysctl_symfile, "A", "");
2047
2048#if NFSCLIENT
6d2010ae 2049STATIC int
2d21ac55
A
2050sysctl_netboot
2051(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2052{
2053 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2054}
2055
2056SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
6d2010ae 2057 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2058 0, 0, sysctl_netboot, "I", "");
2059#endif
2060
b7266188 2061#ifdef CONFIG_IMGSRC_ACCESS
6d2010ae
A
2062/*
2063 * Legacy--act as if only one layer of nesting is possible.
2064 */
2065STATIC int
b7266188
A
2066sysctl_imgsrcdev
2067(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2068{
2069 vfs_context_t ctx = vfs_context_current();
2070 vnode_t devvp;
2071 int result;
2072
2073 if (!vfs_context_issuser(ctx)) {
2074 return EPERM;
2075 }
2076
6d2010ae 2077 if (imgsrc_rootvnodes[0] == NULL) {
b7266188
A
2078 return ENOENT;
2079 }
2080
6d2010ae 2081 result = vnode_getwithref(imgsrc_rootvnodes[0]);
b7266188
A
2082 if (result != 0) {
2083 return result;
2084 }
2085
6d2010ae 2086 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
b7266188
A
2087 result = vnode_getwithref(devvp);
2088 if (result != 0) {
2089 goto out;
2090 }
2091
2092 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
2093
2094 vnode_put(devvp);
2095out:
6d2010ae 2096 vnode_put(imgsrc_rootvnodes[0]);
b7266188
A
2097 return result;
2098}
2099
2100SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
6d2010ae 2101 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
b7266188 2102 0, 0, sysctl_imgsrcdev, "I", "");
6d2010ae
A
2103
2104STATIC int
2105sysctl_imgsrcinfo
2106(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2107{
2108 int error;
5ba3f43e 2109 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */
6d2010ae
A
2110 uint32_t i;
2111 vnode_t rvp, devvp;
2112
2113 if (imgsrc_rootvnodes[0] == NULLVP) {
2114 return ENXIO;
2115 }
2116
2117 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2118 /*
2119 * Go get the root vnode.
2120 */
2121 rvp = imgsrc_rootvnodes[i];
2122 if (rvp == NULLVP) {
2123 break;
2124 }
2125
2126 error = vnode_get(rvp);
2127 if (error != 0) {
2128 return error;
2129 }
2130
2131 /*
2132 * For now, no getting at a non-local volume.
2133 */
2134 devvp = vnode_mount(rvp)->mnt_devvp;
2135 if (devvp == NULL) {
2136 vnode_put(rvp);
2137 return EINVAL;
2138 }
2139
2140 error = vnode_getwithref(devvp);
2141 if (error != 0) {
2142 vnode_put(rvp);
2143 return error;
2144 }
2145
2146 /*
2147 * Fill in info.
2148 */
2149 info[i].ii_dev = vnode_specrdev(devvp);
2150 info[i].ii_flags = 0;
2151 info[i].ii_height = i;
2152 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2153
2154 vnode_put(devvp);
2155 vnode_put(rvp);
2156 }
2157
2158 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2159}
2160
2161SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2162 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2163 0, 0, sysctl_imgsrcinfo, "I", "");
2164
b7266188
A
2165#endif /* CONFIG_IMGSRC_ACCESS */
2166
39236c6e
A
2167
2168SYSCTL_DECL(_kern_timer);
2169SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2170
fe8ab488 2171
39236c6e
A
2172SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2173 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
6d2010ae
A
2174 &mach_timer_coalescing_enabled, 0, "");
2175
39236c6e
A
2176SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2177 CTLFLAG_RW | CTLFLAG_LOCKED,
2178 &timer_deadline_tracking_bin_1, "");
2179SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2180 CTLFLAG_RW | CTLFLAG_LOCKED,
2181 &timer_deadline_tracking_bin_2, "");
2182
2183SYSCTL_DECL(_kern_timer_longterm);
2184SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2185
fe8ab488 2186
39236c6e
A
2187/* Must match definition in osfmk/kern/timer_call.c */
2188enum {
2189 THRESHOLD, QCOUNT,
2190 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
5ba3f43e 2191 LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, PAUSES
39236c6e
A
2192};
2193extern uint64_t timer_sysctl_get(int);
2194extern int timer_sysctl_set(int, uint64_t);
2195
2196STATIC int
2197sysctl_timer
2198(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2199{
2200 int oid = (int)arg1;
2201 uint64_t value = timer_sysctl_get(oid);
2202 uint64_t new_value;
2203 int error;
2204 int changed;
2205
2206 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2207 if (changed)
2208 error = timer_sysctl_set(oid, new_value);
2209
2210 return error;
2211}
2212
2213SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2214 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2215 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
5ba3f43e
A
2216SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
2217 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2218 (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
39236c6e
A
2219SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2220 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2221 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
5ba3f43e 2222#if DEBUG
39236c6e
A
2223SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2224 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2225 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2226SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2227 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2228 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2229SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2230 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2231 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2232SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2233 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2234 (void *) SCANS, 0, sysctl_timer, "Q", "");
2235SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2236 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2237 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2238SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2239 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2240 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2241SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2242 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2243 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2244SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2245 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2246 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
5ba3f43e
A
2247SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
2248 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2249 (void *) PAUSES, 0, sysctl_timer, "Q", "");
39236c6e
A
2250#endif /* DEBUG */
2251
6d2010ae 2252STATIC int
2d21ac55
A
2253sysctl_usrstack
2254(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2255{
2256 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2257}
2258
b0d623f7 2259SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
6d2010ae 2260 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2261 0, 0, sysctl_usrstack, "I", "");
2262
6d2010ae 2263STATIC int
2d21ac55
A
2264sysctl_usrstack64
2265(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2266{
2267 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2268}
2269
2270SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
6d2010ae 2271 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2272 0, 0, sysctl_usrstack64, "Q", "");
2273
39037602
A
2274#if CONFIG_COREDUMP
2275
2d21ac55 2276SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
6d2010ae 2277 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
2278 corefilename, sizeof(corefilename), "");
2279
6d2010ae 2280STATIC int
2d21ac55
A
2281sysctl_coredump
2282(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2283{
593a1d5f 2284#ifdef SECURE_KERNEL
fe8ab488 2285 (void)req;
593a1d5f 2286 return (ENOTSUP);
fe8ab488 2287#else
2d21ac55
A
2288 int new_value, changed;
2289 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2290 if (changed) {
2291 if ((new_value == 0) || (new_value == 1))
2292 do_coredump = new_value;
2293 else
2294 error = EINVAL;
2295 }
2296 return(error);
fe8ab488 2297#endif
2d21ac55
A
2298}
2299
2300SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
6d2010ae 2301 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
2302 0, 0, sysctl_coredump, "I", "");
2303
6d2010ae 2304STATIC int
2d21ac55
A
2305sysctl_suid_coredump
2306(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2307{
593a1d5f 2308#ifdef SECURE_KERNEL
fe8ab488 2309 (void)req;
593a1d5f 2310 return (ENOTSUP);
fe8ab488 2311#else
2d21ac55
A
2312 int new_value, changed;
2313 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2314 if (changed) {
2315 if ((new_value == 0) || (new_value == 1))
2316 sugid_coredump = new_value;
55e303ae
A
2317 else
2318 error = EINVAL;
2319 }
2d21ac55 2320 return(error);
fe8ab488 2321#endif
2d21ac55
A
2322}
2323
2324SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
6d2010ae 2325 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
2326 0, 0, sysctl_suid_coredump, "I", "");
2327
39037602
A
2328#endif /* CONFIG_COREDUMP */
2329
6d2010ae 2330STATIC int
2d21ac55
A
2331sysctl_delayterm
2332(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2333{
2334 struct proc *p = req->p;
2335 int new_value, changed;
2336 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2337 if (changed) {
2338 proc_lock(p);
2339 if (new_value)
2340 req->p->p_lflag |= P_LDELAYTERM;
2341 else
2342 req->p->p_lflag &= ~P_LDELAYTERM;
2343 proc_unlock(p);
2344 }
2345 return(error);
2346}
2347
2348SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
6d2010ae 2349 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
2350 0, 0, sysctl_delayterm, "I", "");
2351
55e303ae 2352
6d2010ae 2353STATIC int
2d21ac55
A
2354sysctl_rage_vnode
2355(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 2356{
2d21ac55
A
2357 struct proc *p = req->p;
2358 struct uthread *ut;
2359 int new_value, old_value, changed;
2360 int error;
55e303ae 2361
2d21ac55
A
2362 ut = get_bsdthread_info(current_thread());
2363
2364 if (ut->uu_flag & UT_RAGE_VNODES)
2365 old_value = KERN_RAGE_THREAD;
2366 else if (p->p_lflag & P_LRAGE_VNODES)
2367 old_value = KERN_RAGE_PROC;
2368 else
2369 old_value = 0;
2370
2371 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2372
2373 if (error == 0) {
2374 switch (new_value) {
2375 case KERN_RAGE_PROC:
2376 proc_lock(p);
2377 p->p_lflag |= P_LRAGE_VNODES;
2378 proc_unlock(p);
2379 break;
2380 case KERN_UNRAGE_PROC:
2381 proc_lock(p);
2382 p->p_lflag &= ~P_LRAGE_VNODES;
2383 proc_unlock(p);
2384 break;
2385
2386 case KERN_RAGE_THREAD:
2387 ut->uu_flag |= UT_RAGE_VNODES;
2388 break;
2389 case KERN_UNRAGE_THREAD:
2390 ut = get_bsdthread_info(current_thread());
2391 ut->uu_flag &= ~UT_RAGE_VNODES;
2392 break;
e5568f75 2393 }
2d21ac55
A
2394 }
2395 return(error);
2396}
2397
2398SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
6d2010ae 2399 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2d21ac55
A
2400 0, 0, sysctl_rage_vnode, "I", "");
2401
316670eb
A
2402/* XXX move this interface into libproc and remove this sysctl */
2403STATIC int
2404sysctl_setthread_cpupercent
2405(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2406{
2407 int new_value, old_value;
2408 int error = 0;
2409 kern_return_t kret = KERN_SUCCESS;
2410 uint8_t percent = 0;
2411 int ms_refill = 0;
2412
39236c6e
A
2413 if (!req->newptr)
2414 return (0);
2415
316670eb
A
2416 old_value = 0;
2417
2418 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2419 return (error);
2420
2421 percent = new_value & 0xff; /* low 8 bytes for perent */
2422 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2423 if (percent > 100)
2424 return (EINVAL);
2425
2426 /*
2427 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2428 */
39236c6e 2429 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
316670eb
A
2430 return (EIO);
2431
2432 return (0);
2433}
2434
2435SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
39236c6e 2436 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
316670eb
A
2437 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2438
2d21ac55 2439
6d2010ae 2440STATIC int
2d21ac55
A
2441sysctl_kern_check_openevt
2442(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2443{
2444 struct proc *p = req->p;
2445 int new_value, old_value, changed;
2446 int error;
2447
2448 if (p->p_flag & P_CHECKOPENEVT) {
2449 old_value = KERN_OPENEVT_PROC;
2450 } else {
2451 old_value = 0;
2452 }
2453
2454 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2455
2456 if (error == 0) {
2457 switch (new_value) {
2458 case KERN_OPENEVT_PROC:
b0d623f7 2459 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2d21ac55
A
2460 break;
2461
2462 case KERN_UNOPENEVT_PROC:
b0d623f7 2463 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2d21ac55
A
2464 break;
2465
2466 default:
55e303ae 2467 error = EINVAL;
2d21ac55 2468 }
55e303ae 2469 }
2d21ac55
A
2470 return(error);
2471}
2472
6d2010ae 2473SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2d21ac55
A
2474 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2475
2476
2477
6d2010ae 2478STATIC int
2d21ac55
A
2479sysctl_nx
2480(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2481{
4a3eedf9 2482#ifdef SECURE_KERNEL
fe8ab488 2483 (void)req;
4a3eedf9 2484 return ENOTSUP;
fe8ab488 2485#else
2d21ac55
A
2486 int new_value, changed;
2487 int error;
2488
2489 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
4a3eedf9
A
2490 if (error)
2491 return error;
2d21ac55 2492
4a3eedf9 2493 if (changed) {
b0d623f7 2494#if defined(__i386__) || defined(__x86_64__)
2d21ac55
A
2495 /*
2496 * Only allow setting if NX is supported on the chip
2497 */
2498 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
4a3eedf9 2499 return ENOTSUP;
2d21ac55 2500#endif
4a3eedf9
A
2501 nx_enabled = new_value;
2502 }
2d21ac55 2503 return(error);
fe8ab488 2504#endif /* SECURE_KERNEL */
2d21ac55
A
2505}
2506
2507
2508
2509SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
6d2010ae 2510 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2d21ac55
A
2511 0, 0, sysctl_nx, "I", "");
2512
6d2010ae 2513STATIC int
2d21ac55
A
2514sysctl_loadavg
2515(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2516{
2517 if (proc_is64bit(req->p)) {
5ba3f43e 2518 struct user64_loadavg loadinfo64 = {};
b0d623f7 2519 fill_loadavg64(&averunnable, &loadinfo64);
2d21ac55
A
2520 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2521 } else {
5ba3f43e 2522 struct user32_loadavg loadinfo32 = {};
b0d623f7
A
2523 fill_loadavg32(&averunnable, &loadinfo32);
2524 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2d21ac55
A
2525 }
2526}
2527
2528SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
6d2010ae 2529 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2530 0, 0, sysctl_loadavg, "S,loadavg", "");
2531
6d2010ae
A
2532/*
2533 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2534 */
2535STATIC int
2536sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2537 __unused int arg2, struct sysctl_req *req)
2538{
2539 int old_value=0, new_value=0, error=0;
2540
2541 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2542 return(error);
2543 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2544 if (!error) {
2545 return (vm_toggle_entry_reuse(new_value, NULL));
2546 }
2547 return(error);
2548}
2549
2550SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2551
39037602 2552
6d2010ae 2553STATIC int
2d21ac55
A
2554sysctl_swapusage
2555(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2556{
2557 int error;
2558 uint64_t swap_total;
2559 uint64_t swap_avail;
b0d623f7 2560 vm_size_t swap_pagesize;
2d21ac55 2561 boolean_t swap_encrypted;
5ba3f43e 2562 struct xsw_usage xsu = {};
2d21ac55
A
2563
2564 error = macx_swapinfo(&swap_total,
2565 &swap_avail,
2566 &swap_pagesize,
2567 &swap_encrypted);
2568 if (error)
2569 return error;
2570
2571 xsu.xsu_total = swap_total;
2572 xsu.xsu_avail = swap_avail;
2573 xsu.xsu_used = swap_total - swap_avail;
2574 xsu.xsu_pagesize = swap_pagesize;
2575 xsu.xsu_encrypted = swap_encrypted;
2576 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2577}
2578
2579
2580
2581SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
6d2010ae 2582 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2583 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2584
6d2010ae
A
2585#if CONFIG_FREEZE
2586extern void vm_page_reactivate_all_throttled(void);
2587
2588static int
2589sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2590{
2591#pragma unused(arg1, arg2)
316670eb 2592 int error, val = memorystatus_freeze_enabled ? 1 : 0;
6d2010ae
A
2593 boolean_t disabled;
2594
2595 error = sysctl_handle_int(oidp, &val, 0, req);
2596 if (error || !req->newptr)
2597 return (error);
39037602
A
2598
2599 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
39236c6e 2600 //assert(req->newptr);
fe8ab488 2601 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
39236c6e
A
2602 return EINVAL;
2603 }
2604
6d2010ae
A
2605 /*
2606 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2607 */
316670eb 2608 disabled = (!val && memorystatus_freeze_enabled);
6d2010ae 2609
316670eb 2610 memorystatus_freeze_enabled = val ? TRUE : FALSE;
6d2010ae
A
2611
2612 if (disabled) {
2613 vm_page_reactivate_all_throttled();
2614 }
2615
2616 return (0);
2617}
2618
316670eb 2619SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
6d2010ae 2620#endif /* CONFIG_FREEZE */
2d21ac55
A
2621
2622/* this kernel does NOT implement shared_region_make_private_np() */
2623SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
6d2010ae 2624 CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7 2625 (int *)NULL, 0, "");
0c530ab8 2626
6d2010ae 2627STATIC int
0c530ab8 2628fetch_process_cputype(
2d21ac55 2629 proc_t cur_proc,
0c530ab8
A
2630 int *name,
2631 u_int namelen,
2632 cpu_type_t *cputype)
2633{
2d21ac55
A
2634 proc_t p = PROC_NULL;
2635 int refheld = 0;
0c530ab8 2636 cpu_type_t ret = 0;
2d21ac55 2637 int error = 0;
0c530ab8
A
2638
2639 if (namelen == 0)
2640 p = cur_proc;
2641 else if (namelen == 1) {
2d21ac55 2642 p = proc_find(name[0]);
0c530ab8
A
2643 if (p == NULL)
2644 return (EINVAL);
2d21ac55 2645 refheld = 1;
0c530ab8 2646 } else {
2d21ac55
A
2647 error = EINVAL;
2648 goto out;
0c530ab8
A
2649 }
2650
fe8ab488
A
2651 ret = cpu_type() & ~CPU_ARCH_MASK;
2652 if (IS_64BIT_PROCESS(p))
2653 ret |= CPU_ARCH_ABI64;
2654
0c530ab8
A
2655 *cputype = ret;
2656
2d21ac55
A
2657 if (refheld != 0)
2658 proc_rele(p);
2659out:
2660 return (error);
0c530ab8
A
2661}
2662
6d2010ae 2663STATIC int
2d21ac55
A
2664sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2665 struct sysctl_req *req)
0c530ab8
A
2666{
2667 int error;
2668 cpu_type_t proc_cputype = 0;
2669 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2670 return error;
2671 int res = 1;
2672 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2673 res = 0;
2674 return SYSCTL_OUT(req, &res, sizeof(res));
2675}
6d2010ae 2676SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
0c530ab8 2677
6d2010ae 2678STATIC int
2d21ac55
A
2679sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2680 struct sysctl_req *req)
0c530ab8
A
2681{
2682 int error;
2683 cpu_type_t proc_cputype = 0;
2684 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2685 return error;
2686 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2687}
6d2010ae 2688SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
0c530ab8 2689
6d2010ae 2690STATIC int
2d21ac55
A
2691sysctl_safeboot
2692(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2693{
2694 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2695}
2696
2697SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
6d2010ae 2698 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2699 0, 0, sysctl_safeboot, "I", "");
2700
6d2010ae 2701STATIC int
2d21ac55
A
2702sysctl_singleuser
2703(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2704{
2705 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2706}
2707
2708SYSCTL_PROC(_kern, OID_AUTO, singleuser,
6d2010ae 2709 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2d21ac55
A
2710 0, 0, sysctl_singleuser, "I", "");
2711
3e170ce0
A
2712STATIC int sysctl_minimalboot
2713(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2714{
2715 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
2716}
2717
2718SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
2719 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2720 0, 0, sysctl_minimalboot, "I", "");
2721
2d21ac55
A
2722/*
2723 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2724 */
2725extern boolean_t affinity_sets_enabled;
2726extern int affinity_sets_mapping;
2727
2728SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
6d2010ae 2729 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2d21ac55 2730SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
6d2010ae 2731 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2d21ac55 2732
316670eb
A
2733/*
2734 * Boolean indicating if KASLR is active.
2735 */
2736STATIC int
2737sysctl_slide
2738(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2739{
2740 uint32_t slide;
2741
2742 slide = vm_kernel_slide ? 1 : 0;
2743
2744 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2745}
2746
2747SYSCTL_PROC(_kern, OID_AUTO, slide,
2748 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2749 0, 0, sysctl_slide, "I", "");
2750
2d21ac55
A
2751/*
2752 * Limit on total memory users can wire.
2753 *
2754 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2755 *
2756 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2757 *
2758 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2759 * kmem_init().
2760 *
2761 * All values are in bytes.
2762 */
2763
b0d623f7 2764vm_map_size_t vm_global_no_user_wire_amount;
2d21ac55
A
2765vm_map_size_t vm_global_user_wire_limit;
2766vm_map_size_t vm_user_wire_limit;
2767
b0d623f7
A
2768/*
2769 * There needs to be a more automatic/elegant way to do this
2770 */
5ba3f43e
A
2771#if defined(__ARM__)
2772SYSCTL_INT(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, 0, "");
2773SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
2774SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, 0, "");
2775#else
6d2010ae
A
2776SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2777SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2778SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
5ba3f43e 2779#endif
b0d623f7 2780
e2d2fc5c
A
2781extern int vm_map_copy_overwrite_aligned_src_not_internal;
2782extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2783extern int vm_map_copy_overwrite_aligned_src_large;
2784SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2785SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2786SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
b0d623f7
A
2787
2788
39236c6e
A
2789extern uint32_t vm_page_external_count;
2790extern uint32_t vm_page_filecache_min;
2791
2792SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2793SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2794
2795extern int vm_compressor_mode;
04b8595b 2796extern int vm_compressor_is_active;
3e170ce0
A
2797extern int vm_compressor_available;
2798extern uint32_t vm_ripe_target_age;
39236c6e
A
2799extern uint32_t swapout_target_age;
2800extern int64_t compressor_bytes_used;
3e170ce0
A
2801extern int64_t c_segment_input_bytes;
2802extern int64_t c_segment_compressed_bytes;
39236c6e
A
2803extern uint32_t compressor_eval_period_in_msecs;
2804extern uint32_t compressor_sample_min_in_msecs;
2805extern uint32_t compressor_sample_max_in_msecs;
2806extern uint32_t compressor_thrashing_threshold_per_10msecs;
2807extern uint32_t compressor_thrashing_min_per_10msecs;
2808extern uint32_t vm_compressor_minorcompact_threshold_divisor;
2809extern uint32_t vm_compressor_majorcompact_threshold_divisor;
2810extern uint32_t vm_compressor_unthrottle_threshold_divisor;
2811extern uint32_t vm_compressor_catchup_threshold_divisor;
39037602 2812extern uint32_t vm_compressor_time_thread;
5ba3f43e
A
2813#if DEVELOPMENT || DEBUG
2814extern vmct_stats_t vmct_stats;
2815#endif
39236c6e 2816
3e170ce0
A
2817SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
2818SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
2819SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2820
39236c6e 2821SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
04b8595b 2822SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
39236c6e 2823SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
3e170ce0
A
2824SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
2825
2826SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
39236c6e
A
2827
2828SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2829SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2830SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2831SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2832SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2833SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2834SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2835SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2836SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2837
fe8ab488
A
2838SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2839
39037602 2840SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
5ba3f43e
A
2841
2842#if DEVELOPMENT || DEBUG
2843SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[0], "");
2844SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[1], "");
2845
2846SYSCTL_QUAD(_vm, OID_AUTO, compressor_threads_total, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_cthreads_total, "");
2847
2848SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[0], "");
2849SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[1], "");
2850
2851SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[0], "");
2852SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[1], "");
2853
2854SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[0], 0, "");
2855SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[1], 0, "");
2856
2857SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, "");
2858SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, "");
2859
2860#endif
39037602
A
2861
2862SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
2863SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
2864SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
2865SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
2866SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
2867
2868SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
2869SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
2870
2871SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
2872
2873SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
5ba3f43e
A
2874
2875SYSCTL_QUAD(_vm, OID_AUTO, wk_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_cabstime, "");
2876
2877SYSCTL_QUAD(_vm, OID_AUTO, wkh_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_cabstime, "");
2878SYSCTL_QUAD(_vm, OID_AUTO, wkh_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_compressions, "");
2879
2880SYSCTL_QUAD(_vm, OID_AUTO, wks_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_cabstime, "");
2881SYSCTL_QUAD(_vm, OID_AUTO, wks_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressions, "");
2882
39037602
A
2883SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
2884SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
2885SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
2886SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
2887SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
2888SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
2889
5ba3f43e
A
2890SYSCTL_QUAD(_vm, OID_AUTO, wks_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressed_bytes, "");
2891SYSCTL_QUAD(_vm, OID_AUTO, wks_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compression_failures, "");
2892SYSCTL_QUAD(_vm, OID_AUTO, wks_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_sv_compressions, "");
2893
2894
39037602 2895SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
5ba3f43e
A
2896
2897SYSCTL_QUAD(_vm, OID_AUTO, wk_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_dabstime, "");
2898
2899SYSCTL_QUAD(_vm, OID_AUTO, wkh_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_dabstime, "");
2900SYSCTL_QUAD(_vm, OID_AUTO, wkh_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_decompressions, "");
2901
2902SYSCTL_QUAD(_vm, OID_AUTO, wks_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_dabstime, "");
2903SYSCTL_QUAD(_vm, OID_AUTO, wks_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_decompressions, "");
2904
39037602
A
2905SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
2906SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
2907
2908SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
2909SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
2910SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
2911SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
2912SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
2913SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
2914SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
2915SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
5ba3f43e
A
2916#if DEVELOPMENT || DEBUG
2917extern int vm_compressor_current_codec;
2918extern int vm_compressor_test_seg_wp;
2919extern boolean_t vm_compressor_force_sw_wkdm;
2920SYSCTL_INT(_vm, OID_AUTO, compressor_codec, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_current_codec, 0, "");
2921SYSCTL_INT(_vm, OID_AUTO, compressor_test_wp, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_test_seg_wp, 0, "");
2922
2923SYSCTL_INT(_vm, OID_AUTO, wksw_force, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_force_sw_wkdm, 0, "");
2924extern int precompy, wkswhw;
2925
2926SYSCTL_INT(_vm, OID_AUTO, precompy, CTLFLAG_RW | CTLFLAG_LOCKED, &precompy, 0, "");
2927SYSCTL_INT(_vm, OID_AUTO, wkswhw, CTLFLAG_RW | CTLFLAG_LOCKED, &wkswhw, 0, "");
2928extern unsigned int vm_ktrace_enabled;
2929SYSCTL_INT(_vm, OID_AUTO, vm_ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ktrace_enabled, 0, "");
2930#endif
39037602 2931
fe8ab488
A
2932#if CONFIG_PHANTOM_CACHE
2933extern uint32_t phantom_cache_thrashing_threshold;
2934extern uint32_t phantom_cache_eval_period_in_msecs;
2935extern uint32_t phantom_cache_thrashing_threshold_ssd;
2936
2937
2938SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2939SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2940SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2941#endif
2942
39037602
A
2943#if CONFIG_BACKGROUND_QUEUE
2944
2945extern uint32_t vm_page_background_count;
39037602
A
2946extern uint32_t vm_page_background_target;
2947extern uint32_t vm_page_background_internal_count;
2948extern uint32_t vm_page_background_external_count;
2949extern uint32_t vm_page_background_mode;
2950extern uint32_t vm_page_background_exclude_external;
2951extern uint64_t vm_page_background_promoted_count;
2952extern uint64_t vm_pageout_considered_bq_internal;
2953extern uint64_t vm_pageout_considered_bq_external;
2954extern uint64_t vm_pageout_rejected_bq_internal;
2955extern uint64_t vm_pageout_rejected_bq_external;
2956
2957SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
2958SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
39037602
A
2959SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
2960SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
2961SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
2962SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
2963
2964SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
2965SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_internal, "");
2966SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_external, "");
2967SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
2968SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
2969
2970#endif
2971
04b8595b
A
2972#if (DEVELOPMENT || DEBUG)
2973
2974SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
3e170ce0
A
2975 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2976 &vm_page_creation_throttled_hard, 0, "");
04b8595b
A
2977
2978SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
3e170ce0
A
2979 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2980 &vm_page_creation_throttled_soft, 0, "");
04b8595b 2981
39037602
A
2982extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
2983extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
2984SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
2985SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
2986
2987extern uint32_t vm_grab_anon_overrides;
2988extern uint32_t vm_grab_anon_nops;
2989
2990SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_overrides, 0, "");
2991SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_nops, 0, "");
2992
2993/* log message counters for persistence mode */
2994extern uint32_t oslog_p_total_msgcount;
2995extern uint32_t oslog_p_metadata_saved_msgcount;
2996extern uint32_t oslog_p_metadata_dropped_msgcount;
2997extern uint32_t oslog_p_error_count;
2998extern uint32_t oslog_p_saved_msgcount;
2999extern uint32_t oslog_p_dropped_msgcount;
3000extern uint32_t oslog_p_boot_dropped_msgcount;
3001
3002/* log message counters for streaming mode */
3003extern uint32_t oslog_s_total_msgcount;
3004extern uint32_t oslog_s_metadata_msgcount;
3005extern uint32_t oslog_s_error_count;
3006extern uint32_t oslog_s_streamed_msgcount;
3007extern uint32_t oslog_s_dropped_msgcount;
3008
3009SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
3010SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
3011SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
3012SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
3013SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
3014SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
3015SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
3016
3017SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
3018SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
3019SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
3020SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
3021SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
3022
3023
04b8595b
A
3024#endif /* DEVELOPMENT || DEBUG */
3025
b0d623f7 3026/*
fe8ab488 3027 * Enable tracing of voucher contents
b0d623f7 3028 */
fe8ab488 3029extern uint32_t ipc_voucher_trace_contents;
b0d623f7 3030
fe8ab488
A
3031SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
3032 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
b0d623f7
A
3033
3034/*
3035 * Kernel stack size and depth
3036 */
3037SYSCTL_INT (_kern, OID_AUTO, stack_size,
6d2010ae 3038 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
b0d623f7 3039SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
6d2010ae 3040 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
b0d623f7 3041
5ba3f43e
A
3042extern unsigned int kern_feature_overrides;
3043SYSCTL_INT (_kern, OID_AUTO, kern_feature_overrides,
3044 CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
3045
b7266188
A
3046/*
3047 * enable back trace for port allocations
3048 */
3049extern int ipc_portbt;
3050
3051SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
6d2010ae 3052 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
b7266188
A
3053 &ipc_portbt, 0, "");
3054
6d2010ae
A
3055/*
3056 * Scheduler sysctls
3057 */
3058
6d2010ae
A
3059SYSCTL_STRING(_kern, OID_AUTO, sched,
3060 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3061 sched_string, sizeof(sched_string),
3062 "Timeshare scheduler implementation");
316670eb
A
3063
3064/*
3065 * Only support runtime modification on embedded platforms
3066 * with development config enabled
3067 */
5ba3f43e
A
3068#if CONFIG_EMBEDDED
3069#if !SECURE_KERNEL
3070extern int precise_user_kernel_time;
3071SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
3072 CTLFLAG_RW | CTLFLAG_LOCKED,
3073 &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
3074#endif
3075#endif
fe8ab488
A
3076
3077
3078/* Parameters related to timer coalescing tuning, to be replaced
3079 * with a dedicated systemcall in the future.
3080 */
3081/* Enable processing pending timers in the context of any other interrupt
3082 * Coalescing tuning parameters for various thread/task attributes */
3083STATIC int
3084sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3085{
3086#pragma unused(oidp)
3087 int size = arg2; /* subcommand*/
3088 int error;
3089 int changed = 0;
3090 uint64_t old_value_ns;
3091 uint64_t new_value_ns;
3092 uint64_t value_abstime;
3093 if (size == sizeof(uint32_t))
3094 value_abstime = *((uint32_t *)arg1);
3095 else if (size == sizeof(uint64_t))
3096 value_abstime = *((uint64_t *)arg1);
3097 else return ENOTSUP;
3098
3099 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
3100 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
3101 if ((error) || (!changed))
3102 return error;
3103
3104 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
3105 if (size == sizeof(uint32_t))
3106 *((uint32_t *)arg1) = (uint32_t)value_abstime;
3107 else
3108 *((uint64_t *)arg1) = value_abstime;
3109 return error;
3110}
3111
3112SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
3113 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3114 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
3115SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
3116 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3117 &tcoal_prio_params.timer_resort_threshold_abstime,
3118 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
3119 sysctl_timer_user_us_kernel_abstime,
3120 "Q", "");
3121SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
3122 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3123 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
3124 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
3125 sysctl_timer_user_us_kernel_abstime,
3126 "Q", "");
3127
3128SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
3129 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3130 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
3131
3132SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
3133 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3134 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
3135 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
3136 sysctl_timer_user_us_kernel_abstime,
3137 "Q", "");
3138
3139SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
3140 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3141 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
3142
3143SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
3144 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3145 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
3146 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
3147 sysctl_timer_user_us_kernel_abstime,
3148 "Q", "");
3149
3150SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
3151 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3152 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
3153
3154SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
3155 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3156 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
3157 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
3158 sysctl_timer_user_us_kernel_abstime,
3159 "Q", "");
3160
3161SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
3162 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3163 &tcoal_prio_params.latency_qos_scale[0], 0, "");
3164
3165SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
3166 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3167 &tcoal_prio_params.latency_qos_abstime_max[0],
3168 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
3169 sysctl_timer_user_us_kernel_abstime,
3170 "Q", "");
3171
3172SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
3173 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3174 &tcoal_prio_params.latency_qos_scale[1], 0, "");
3175
3176SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
3177 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3178 &tcoal_prio_params.latency_qos_abstime_max[1],
3179 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
3180 sysctl_timer_user_us_kernel_abstime,
3181 "Q", "");
3182
3183SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
3184 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3185 &tcoal_prio_params.latency_qos_scale[2], 0, "");
3186
3187SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
3188 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3189 &tcoal_prio_params.latency_qos_abstime_max[2],
3190 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
3191 sysctl_timer_user_us_kernel_abstime,
3192 "Q", "");
3193
3194SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
3195 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3196 &tcoal_prio_params.latency_qos_scale[3], 0, "");
3197
3198SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
3199 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3200 &tcoal_prio_params.latency_qos_abstime_max[3],
3201 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
3202 sysctl_timer_user_us_kernel_abstime,
3203 "Q", "");
3204
3205SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
3206 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3207 &tcoal_prio_params.latency_qos_scale[4], 0, "");
3208
3209SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
3210 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3211 &tcoal_prio_params.latency_qos_abstime_max[4],
3212 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
3213 sysctl_timer_user_us_kernel_abstime,
3214 "Q", "");
3215
3216SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
3217 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3218 &tcoal_prio_params.latency_qos_scale[5], 0, "");
3219
3220SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
3221 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3222 &tcoal_prio_params.latency_qos_abstime_max[5],
3223 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
3224 sysctl_timer_user_us_kernel_abstime,
3225 "Q", "");
3226
3227/* Communicate the "user idle level" heuristic to the timer layer, and
3228 * potentially other layers in the future.
3229 */
3230
3231static int
3232timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3233 int new_value = 0, old_value = 0, changed = 0, error;
3234
3235 old_value = timer_get_user_idle_level();
3236
3237 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3238
3239 if (error == 0 && changed) {
3240 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
3241 error = ERANGE;
3242 }
3243
3244 return error;
3245}
3246
3247SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
3248 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3249 0, 0,
3250 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
3251
3252#if HYPERVISOR
3253SYSCTL_INT(_kern, OID_AUTO, hv_support,
3254 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
3255 &hv_support_available, 0, "");
3256#endif
3e170ce0 3257
5ba3f43e
A
3258#if CONFIG_EMBEDDED
3259STATIC int
3260sysctl_darkboot SYSCTL_HANDLER_ARGS
3261{
3262 int err = 0, value = 0;
3263#pragma unused(oidp, arg1, arg2, err, value, req)
3264
3265 /*
3266 * Handle the sysctl request.
3267 *
3268 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3269 * we'll get the request identifier into "value" and then we can honor it.
3270 */
3271 if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
3272 goto exit;
3273 }
3274
3275 /* writing requested, let's process the request */
3276 if (req->newptr) {
3277 /* writing is protected by an entitlement */
3278 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
3279 err = EPERM;
3280 goto exit;
3281 }
3282
3283 switch (value) {
3284 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
3285 /*
3286 * If the darkboot sysctl is unset, the NVRAM variable
3287 * must be unset too. If that's not the case, it means
3288 * someone is doing something crazy and not supported.
3289 */
3290 if (darkboot != 0) {
3291 int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
3292 if (ret) {
3293 darkboot = 0;
3294 } else {
3295 err = EINVAL;
3296 }
3297 }
3298 break;
3299 case MEMORY_MAINTENANCE_DARK_BOOT_SET:
3300 darkboot = 1;
3301 break;
3302 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
3303 /*
3304 * Set the NVRAM and update 'darkboot' in case
3305 * of success. Otherwise, do not update
3306 * 'darkboot' and report the failure.
3307 */
3308 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
3309 darkboot = 1;
3310 } else {
3311 err = EINVAL;
3312 }
3313
3314 break;
3315 }
3316 default:
3317 err = EINVAL;
3318 }
3319 }
3320
3321exit:
3322 return err;
3323}
3324
3325SYSCTL_PROC(_kern, OID_AUTO, darkboot,
3326 CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3327 0, 0, sysctl_darkboot, "I", "");
3328#endif
3e170ce0 3329
39037602
A
3330/*
3331 * This is set by core audio to tell tailspin (ie background tracing) how long
3332 * its smallest buffer is. Background tracing can then try to make a reasonable
3333 * decisions to try to avoid introducing so much latency that the buffers will
3334 * underflow.
3335 */
3336
3337int min_audio_buffer_usec;
3338
3339STATIC int
3340sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3341{
3342#pragma unused(oidp, arg1, arg2)
3343 int err = 0, value = 0, changed = 0;
3344 err = sysctl_io_number(req, min_audio_buffer_usec, sizeof(int), &value, &changed);
3345 if (err) goto exit;
3346
3347 if (changed) {
3348 /* writing is protected by an entitlement */
3349 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY, 0) != 0) {
3350 err = EPERM;
3351 goto exit;
3352 }
3353 min_audio_buffer_usec = value;
3354 }
3355exit:
3356 return err;
3357}
3358
3359SYSCTL_PROC(_kern, OID_AUTO, min_audio_buffer_usec, CTLFLAG_RW | CTLFLAG_ANYBODY, 0, 0, sysctl_audio_buffer, "I", "Minimum audio buffer size, in microseconds");
3360
3361#if DEVELOPMENT || DEBUG
3362#include <sys/sysent.h>
3363/* This should result in a fatal exception, verifying that "sysent" is
3364 * write-protected.
3365 */
3366static int
3367kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3368 uint64_t new_value = 0, old_value = 0;
3369 int changed = 0, error;
3370
3371 error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
3372 if ((error == 0) && changed) {
3373 volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
3374 *wraddr = 0;
3375 printf("sysent[0] write succeeded\n");
3376 }
3377 return error;
3378}
3379
3380SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
3381 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3382 0, 0,
3383 kern_sysent_write, "I", "Attempt sysent[0] write");
3384
3385#endif
3386
3387#if DEVELOPMENT || DEBUG
3388SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
3389#else
3390SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
3391#endif
5ba3f43e
A
3392
3393
3394#if DEVELOPMENT || DEBUG
3395
3396static int
3397sysctl_panic_test SYSCTL_HANDLER_ARGS
3398{
3399#pragma unused(arg1, arg2)
3400 int rval = 0;
3401 char str[32] = "entry prelog postlog postcore";
3402
3403 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3404
3405 if (rval == 0 && req->newptr) {
3406 if (strncmp("entry", str, strlen("entry")) == 0) {
3407 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
3408 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3409 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
3410 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3411 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
3412 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3413 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
3414 }
3415 }
3416
3417 return rval;
3418}
3419
3420static int
3421sysctl_debugger_test SYSCTL_HANDLER_ARGS
3422{
3423#pragma unused(arg1, arg2)
3424 int rval = 0;
3425 char str[32] = "entry prelog postlog postcore";
3426
3427 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3428
3429 if (rval == 0 && req->newptr) {
3430 if (strncmp("entry", str, strlen("entry")) == 0) {
3431 DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
3432 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3433 DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
3434 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3435 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
3436 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3437 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
3438 }
3439 }
3440
3441 return rval;
3442}
3443
3444decl_lck_spin_data(, spinlock_panic_test_lock)
3445
3446__attribute__((noreturn))
3447static void
3448spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
3449{
3450 lck_spin_lock(&spinlock_panic_test_lock);
3451 while (1) { ; }
3452}
3453
3454static int
3455sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
3456{
3457#pragma unused(oidp, arg1, arg2)
3458 if (req->newlen == 0)
3459 return EINVAL;
3460
3461 thread_t panic_spinlock_thread;
3462 /* Initialize panic spinlock */
3463 lck_grp_t * panic_spinlock_grp;
3464 lck_grp_attr_t * panic_spinlock_grp_attr;
3465 lck_attr_t * panic_spinlock_attr;
3466
3467 panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
3468 panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
3469 panic_spinlock_attr = lck_attr_alloc_init();
3470
3471 lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
3472
3473
3474 /* Create thread to acquire spinlock */
3475 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
3476 return EBUSY;
3477 }
3478
3479 /* Try to acquire spinlock -- should panic eventually */
3480 lck_spin_lock(&spinlock_panic_test_lock);
3481 while(1) { ; }
3482}
3483
3484__attribute__((noreturn))
3485static void
3486simultaneous_panic_worker
3487(void * arg, wait_result_t wres __unused)
3488{
3489 atomic_int *start_panic = (atomic_int *)arg;
3490
3491 while (!atomic_load(start_panic)) { ; }
3492 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
3493 __builtin_unreachable();
3494}
3495
3496static int
3497sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
3498{
3499#pragma unused(oidp, arg1, arg2)
3500 if (req->newlen == 0)
3501 return EINVAL;
3502
3503 int i = 0, threads_to_create = 2 * processor_count;
3504 atomic_int start_panic = 0;
3505 unsigned int threads_created = 0;
3506 thread_t new_panic_thread;
3507
3508 for (i = threads_to_create; i > 0; i--) {
3509 if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
3510 threads_created++;
3511 }
3512 }
3513
3514 /* FAIL if we couldn't create at least processor_count threads */
3515 if (threads_created < processor_count) {
3516 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
3517 threads_created, threads_to_create);
3518 }
3519
3520 atomic_exchange(&start_panic, 1);
3521 while (1) { ; }
3522}
3523
3524SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
3525SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
3526SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
3527SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
3528
3529
3530#endif /* DEVELOPMENT || DEBUG */
3531
3532const uint32_t thread_groups_supported = 0;
3533
3534STATIC int
3535sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3536{
3537 int value = thread_groups_supported;
3538 return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
3539}
3540
3541SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
3542 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
3543
3544static int
3545sysctl_grade_cputype SYSCTL_HANDLER_ARGS
3546{
3547#pragma unused(arg1, arg2, oidp)
3548 int error = 0;
3549 int type_tuple[2] = {};
3550 int return_value = 0;
3551
3552 error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
3553
3554 if (error) {
3555 return error;
3556 }
3557
3558 return_value = grade_binary(type_tuple[0], type_tuple[1]);
3559
3560 error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
3561
3562 if (error) {
3563 return error;
3564 }
3565
3566 return error;
3567}
3568
3569SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
3570 CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED|CTLTYPE_OPAQUE,
3571 0, 0, &sysctl_grade_cputype, "S",
3572 "grade value of cpu_type_t+cpu_sub_type_t");