]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_sysctl.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
2d21ac55
A
66/*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
1c79356b
A
72
73/*
0a7de745 74 * DEPRECATED sysctl system call code
2d21ac55
A
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
1c79356b
A
84 */
85
86#include <sys/param.h>
87#include <sys/systm.h>
88#include <sys/kernel.h>
89#include <sys/malloc.h>
91447636
A
90#include <sys/proc_internal.h>
91#include <sys/kauth.h>
92#include <sys/file_internal.h>
93#include <sys/vnode_internal.h>
1c79356b
A
94#include <sys/unistd.h>
95#include <sys/buf.h>
96#include <sys/ioctl.h>
55e303ae 97#include <sys/namei.h>
1c79356b
A
98#include <sys/tty.h>
99#include <sys/disklabel.h>
100#include <sys/vm.h>
101#include <sys/sysctl.h>
9bccf70c 102#include <sys/user.h>
55e303ae 103#include <sys/aio_kern.h>
2d21ac55 104#include <sys/reboot.h>
3e170ce0
A
105#include <sys/memory_maintenance.h>
106#include <sys/priv.h>
5ba3f43e 107#include <stdatomic.h>
c3c9b80d 108#include <uuid/uuid.h>
e5568f75 109
b0d623f7
A
110#include <security/audit/audit.h>
111#include <kern/kalloc.h>
e5568f75 112
5ba3f43e 113#include <machine/smp.h>
0a7de745 114#include <machine/atomic.h>
f427ee49 115#include <machine/config.h>
1c79356b 116#include <mach/machine.h>
6d2010ae 117#include <mach/mach_host.h>
1c79356b 118#include <mach/mach_types.h>
5ba3f43e 119#include <mach/processor_info.h>
1c79356b 120#include <mach/vm_param.h>
5ba3f43e 121#include <kern/debug.h>
b0d623f7 122#include <kern/mach_param.h>
1c79356b 123#include <kern/task.h>
316670eb 124#include <kern/thread.h>
5ba3f43e 125#include <kern/thread_group.h>
6d2010ae 126#include <kern/processor.h>
5ba3f43e 127#include <kern/cpu_number.h>
d9a64523 128#include <kern/cpu_quiesce.h>
3e170ce0 129#include <kern/sched_prim.h>
1c79356b 130#include <vm/vm_kern.h>
91447636 131#include <vm/vm_map.h>
1c79356b
A
132#include <mach/host_info.h>
133
91447636 134#include <sys/mount_internal.h>
1c79356b 135#include <sys/kdebug.h>
cb323159 136#include <sys/kern_sysctl.h>
1c79356b
A
137
138#include <IOKit/IOPlatformExpert.h>
139#include <pexpert/pexpert.h>
140
55e303ae 141#include <machine/machine_routines.h>
0c530ab8 142#include <machine/exec.h>
1c79356b 143
ea3f0419
A
144#include <nfs/nfs_conf.h>
145
91447636 146#include <vm/vm_protos.h>
39236c6e 147#include <vm/vm_pageout.h>
39037602 148#include <vm/vm_compressor_algorithms.h>
6d2010ae 149#include <sys/imgsrc.h>
fe8ab488 150#include <kern/timer_call.h>
f427ee49
A
151#include <sys/codesign.h>
152#include <IOKit/IOBSD.h>
153#if CONFIG_CSR
154#include <sys/csr.h>
155#endif
91447636 156
b0d623f7 157#if defined(__i386__) || defined(__x86_64__)
0c530ab8
A
158#include <i386/cpuid.h>
159#endif
160
316670eb
A
161#if CONFIG_FREEZE
162#include <sys/kern_memorystatus.h>
163#endif
164
39236c6e
A
165#if KPERF
166#include <kperf/kperf.h>
167#endif
168
fe8ab488
A
169#if HYPERVISOR
170#include <kern/hv_support.h>
171#endif
172
316670eb
A
173/*
174 * deliberately setting max requests to really high number
175 * so that runaway settings do not cause MALLOC overflows
176 */
177#define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
178
0a7de745
A
179extern int aio_max_requests;
180extern int aio_max_requests_per_process;
181extern int aio_worker_threads;
91447636
A
182extern int lowpri_IO_window_msecs;
183extern int lowpri_IO_delay_msecs;
0a7de745 184#if DEVELOPMENT || DEBUG
0c530ab8 185extern int nx_enabled;
0a7de745 186#endif
2d21ac55 187extern int speculative_reads_disabled;
6d2010ae 188extern unsigned int speculative_prefetch_max;
316670eb 189extern unsigned int speculative_prefetch_max_iosize;
fe8ab488
A
190extern unsigned int preheat_max_bytes;
191extern unsigned int preheat_min_bytes;
b0d623f7 192extern long numvnodes;
c3c9b80d 193extern long freevnodes;
0a7de745 194extern long num_recycledvnodes;
1c79356b 195
39236c6e
A
196extern uuid_string_t bootsessionuuid_string;
197
6d2010ae
A
198extern unsigned int vm_max_delayed_work_limit;
199extern unsigned int vm_max_batch;
200
201extern unsigned int vm_page_free_min;
202extern unsigned int vm_page_free_target;
203extern unsigned int vm_page_free_reserved;
6d2010ae 204
04b8595b 205#if (DEVELOPMENT || DEBUG)
0a7de745
A
206extern uint32_t vm_page_creation_throttled_hard;
207extern uint32_t vm_page_creation_throttled_soft;
04b8595b
A
208#endif /* DEVELOPMENT || DEBUG */
209
cb323159
A
210#if CONFIG_LOCKERBOOT
211extern const char kernel_protoboot_mount[];
212#endif
213
6d2010ae
A
214/*
215 * Conditionally allow dtrace to see these functions for debugging purposes.
216 */
217#ifdef STATIC
218#undef STATIC
219#endif
220#if 0
221#define STATIC
222#else
223#define STATIC static
224#endif
225
226extern boolean_t mach_timer_coalescing_enabled;
227
39236c6e
A
228extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
229
6d2010ae 230STATIC void
316670eb
A
231fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
232STATIC void
233fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
6d2010ae 234STATIC void
316670eb 235fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
6d2010ae 236STATIC void
316670eb 237fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
6d2010ae 238STATIC void
316670eb 239fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
6d2010ae 240STATIC void
316670eb
A
241fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
242
0a7de745 243extern int
91447636 244kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
ea3f0419 245#if CONFIG_NFS_CLIENT
0a7de745 246extern int
91447636
A
247netboot_root(void);
248#endif
249int
0a7de745
A
250sysctl_procargs(int *name, u_int namelen, user_addr_t where,
251 size_t *sizep, proc_t cur_proc);
6d2010ae 252STATIC int
0a7de745
A
253sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
254 proc_t cur_proc, int argc_yes);
91447636 255int
0a7de745
A
256sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
257 size_t newlen, void *sp, int len);
1c79356b 258
6d2010ae
A
259STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
260STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
261STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
262STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
263STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
2d21ac55 264int sysdoproc_callback(proc_t p, void *arg);
1c79356b 265
f427ee49
A
266#if CONFIG_THREAD_GROUPS && (DEVELOPMENT || DEBUG)
267STATIC int sysctl_get_thread_group_id SYSCTL_HANDLER_ARGS;
268#endif
6d2010ae
A
269
270/* forward declarations for non-static STATIC */
271STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
272STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
6d2010ae
A
273STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
6d2010ae 276STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
6d2010ae
A
277#if COUNT_SYSCALLS
278STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
0a7de745 279#endif /* COUNT_SYSCALLS */
f427ee49 280#if defined(XNU_TARGET_OS_OSX)
6d2010ae 281STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
f427ee49 282#endif /* defined(XNU_TARGET_OS_OSX) */
6d2010ae
A
283STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
284STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
6d2010ae
A
285STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
288STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
293STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
f427ee49 297STATIC int sysctl_bootuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
6d2010ae 298STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
ea3f0419 299#if CONFIG_NFS_CLIENT
6d2010ae
A
300STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
301#endif
302#ifdef CONFIG_IMGSRC_ACCESS
303STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
304#endif
305STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
306STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
39037602 307#if CONFIG_COREDUMP
6d2010ae
A
308STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
309STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
39037602 310#endif
6d2010ae
A
311STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
312STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
313STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
0a7de745 314#if DEVELOPMENT || DEBUG
6d2010ae 315STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
0a7de745 316#endif
6d2010ae
A
317STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
318STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
319STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
6d2010ae
A
320STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
321STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
322STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
323STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
324STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
3e170ce0 325STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
316670eb 326STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
6d2010ae 327
d9a64523
A
328#ifdef CONFIG_XNUPOST
329#include <tests/xnupost.h>
330
331STATIC int sysctl_debug_test_oslog_ctl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
332STATIC int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
333STATIC int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
334#endif
2d21ac55 335
0a7de745 336extern void IORegistrySetOSBuildVersion(char * build_version);
91447636 337
6d2010ae 338STATIC void
b0d623f7 339fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
91447636 340{
0a7de745
A
341 la64->ldavg[0] = la->ldavg[0];
342 la64->ldavg[1] = la->ldavg[1];
343 la64->ldavg[2] = la->ldavg[2];
344 la64->fscale = (user64_long_t)la->fscale;
b0d623f7
A
345}
346
6d2010ae 347STATIC void
b0d623f7
A
348fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
349{
0a7de745
A
350 la32->ldavg[0] = la->ldavg[0];
351 la32->ldavg[1] = la->ldavg[1];
352 la32->ldavg[2] = la->ldavg[2];
353 la32->fscale = (user32_long_t)la->fscale;
91447636
A
354}
355
39037602 356#if CONFIG_COREDUMP
1c79356b
A
357/*
358 * Attributes stored in the kernel.
359 */
0a7de745 360extern char corefilename[MAXPATHLEN + 1];
39236c6e
A
361extern int do_coredump;
362extern int sugid_coredump;
39037602 363#endif
e5568f75 364
2d21ac55 365#if COUNT_SYSCALLS
39236c6e 366extern int do_count_syscalls;
2d21ac55 367#endif
55e303ae 368
1c79356b
A
369#ifdef INSECURE
370int securelevel = -1;
371#else
372int securelevel;
373#endif
374
6d2010ae 375STATIC int
0a7de745
A
376sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
377 __unused int arg2, struct sysctl_req *req)
b0d623f7
A
378{
379 int error;
380 struct uthread *ut = get_bsdthread_info(current_thread());
0a7de745
A
381 user_addr_t oldp = 0, newp = 0;
382 size_t *oldlenp = NULL;
383 size_t newlen = 0;
b0d623f7
A
384
385 oldp = req->oldptr;
386 oldlenp = &(req->oldlen);
387 newp = req->newptr;
388 newlen = req->newlen;
389
390 /* We want the current length, and maybe the string itself */
0a7de745 391 if (oldlenp) {
b0d623f7
A
392 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
393 size_t currlen = MAXTHREADNAMESIZE - 1;
0a7de745
A
394
395 if (ut->pth_name) {
b0d623f7
A
396 /* use length of current thread name */
397 currlen = strlen(ut->pth_name);
0a7de745
A
398 }
399 if (oldp) {
400 if (*oldlenp < currlen) {
b0d623f7 401 return ENOMEM;
0a7de745 402 }
b0d623f7 403 /* NOTE - we do not copy the NULL terminator */
0a7de745
A
404 if (ut->pth_name) {
405 error = copyout(ut->pth_name, oldp, currlen);
406 if (error) {
b0d623f7 407 return error;
0a7de745 408 }
b0d623f7 409 }
0a7de745 410 }
b0d623f7
A
411 /* return length of thread name minus NULL terminator (just like strlen) */
412 req->oldidx = currlen;
413 }
414
415 /* We want to set the name to something */
0a7de745
A
416 if (newp) {
417 if (newlen > (MAXTHREADNAMESIZE - 1)) {
b0d623f7 418 return ENAMETOOLONG;
0a7de745
A
419 }
420 if (!ut->pth_name) {
cb323159
A
421 char *tmp_pth_name = (char *)kalloc(MAXTHREADNAMESIZE);
422 if (!tmp_pth_name) {
b0d623f7 423 return ENOMEM;
0a7de745 424 }
cb323159
A
425 bzero(tmp_pth_name, MAXTHREADNAMESIZE);
426 if (!OSCompareAndSwapPtr(NULL, tmp_pth_name, &ut->pth_name)) {
427 kfree(tmp_pth_name, MAXTHREADNAMESIZE);
428 return EBUSY;
429 }
39037602
A
430 } else {
431 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
cb323159 432 bzero(ut->pth_name, MAXTHREADNAMESIZE);
b0d623f7 433 }
b0d623f7 434 error = copyin(newp, ut->pth_name, newlen);
39037602 435 if (error) {
b0d623f7 436 return error;
39037602
A
437 }
438
439 kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
b0d623f7 440 }
0a7de745 441
b0d623f7
A
442 return 0;
443}
444
0a7de745 445SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname, "A", "");
b0d623f7 446
6d2010ae
A
447#define BSD_HOST 1
448STATIC int
449sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
450{
451 host_basic_info_data_t hinfo;
452 kern_return_t kret;
453 uint32_t size;
c3c9b80d 454 uint32_t buf_size = 0;
6d2010ae
A
455 int changed;
456 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
457 struct _processor_statistics_np *buf;
458 int error;
55e303ae 459
6d2010ae
A
460 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
461 if (kret != KERN_SUCCESS) {
462 return EINVAL;
463 }
1c79356b 464
6d2010ae 465 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
0a7de745 466
6d2010ae
A
467 if (req->oldlen < size) {
468 return EINVAL;
469 }
470
c3c9b80d
A
471 buf_size = size;
472 buf = kheap_alloc(KHEAP_TEMP, buf_size, Z_ZERO | Z_WAITOK);
0a7de745 473
6d2010ae
A
474 kret = get_sched_statistics(buf, &size);
475 if (kret != KERN_SUCCESS) {
476 error = EINVAL;
477 goto out;
478 }
479
480 error = sysctl_io_opaque(req, buf, size, &changed);
481 if (error) {
482 goto out;
483 }
484
485 if (changed) {
486 panic("Sched info changed?!");
487 }
488out:
c3c9b80d 489 kheap_free(KHEAP_TEMP, buf, buf_size);
6d2010ae
A
490 return error;
491}
492
493SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
494
495STATIC int
496sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
497{
498 boolean_t active;
499 int res;
500
501 if (req->newlen != sizeof(active)) {
502 return EINVAL;
503 }
504
505 res = copyin(req->newptr, &active, sizeof(active));
506 if (res != 0) {
507 return res;
508 }
509
510 return set_sched_stats_active(active);
511}
512
513SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
514
3e170ce0
A
515extern uint32_t sched_debug_flags;
516SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
517
518#if (DEBUG || DEVELOPMENT)
519extern boolean_t doprnt_hide_pointers;
520SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
521#endif
522
6d2010ae 523extern int get_kernel_symfile(proc_t, char **);
1c79356b 524
2d21ac55 525#if COUNT_SYSCALLS
6d2010ae
A
526#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
527
cb323159 528extern const unsigned int nsysent;
6d2010ae
A
529extern int syscalls_log[];
530extern const char *syscallnames[];
531
532STATIC int
533sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
534{
0a7de745
A
535 __unused int cmd = oidp->oid_arg2; /* subcommand*/
536 __unused int *name = arg1; /* oid element argument vector */
537 __unused int namelen = arg2; /* number of oid element arguments */
c3c9b80d 538 int error, changed;
6d2010ae
A
539
540 int tmp;
541
542 /* valid values passed in:
543 * = 0 means don't keep called counts for each bsd syscall
544 * > 0 means keep called counts for each bsd syscall
545 * = 2 means dump current counts to the system log
546 * = 3 means reset all counts
0a7de745 547 * for example, to dump current counts:
6d2010ae
A
548 * sysctl -w kern.count_calls=2
549 */
c3c9b80d
A
550 error = sysctl_io_number(req, do_count_syscalls,
551 sizeof(do_count_syscalls), &tmp, &changed);
552
553 if (error != 0 || !changed) {
0a7de745 554 return error;
6d2010ae 555 }
0a7de745
A
556
557 if (tmp == 1) {
6d2010ae 558 do_count_syscalls = 1;
0a7de745 559 } else if (tmp == 0 || tmp == 2 || tmp == 3) {
c3c9b80d 560 for (int i = 0; i < nsysent; i++) {
0a7de745
A
561 if (syscalls_log[i] != 0) {
562 if (tmp == 2) {
6d2010ae 563 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
0a7de745 564 } else {
6d2010ae 565 syscalls_log[i] = 0;
2d21ac55 566 }
2d21ac55
A
567 }
568 }
c3c9b80d 569 do_count_syscalls = (tmp != 0);
0a7de745 570 }
6d2010ae 571
0a7de745 572 return error;
1c79356b 573}
0a7de745
A
574SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
575 0, /* Pointer argument (arg1) */
576 0, /* Integer argument (arg2) */
577 sysctl_docountsyscalls, /* Handler function */
578 NULL, /* Data pointer */
579 "");
580#endif /* COUNT_SYSCALLS */
1c79356b 581
2d21ac55
A
582/*
583 * The following sysctl_* functions should not be used
584 * any more, as they can only cope with callers in
585 * user mode: Use new-style
586 * sysctl_io_number()
587 * sysctl_io_string()
588 * sysctl_io_opaque()
589 * instead.
590 */
591
6d2010ae 592STATIC int
2d21ac55
A
593sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
594{
0a7de745
A
595 if (p->p_pid != (pid_t)*(int*)arg) {
596 return 0;
597 } else {
598 return 1;
599 }
2d21ac55
A
600}
601
6d2010ae 602STATIC int
2d21ac55
A
603sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
604{
0a7de745
A
605 if (p->p_pgrpid != (pid_t)*(int*)arg) {
606 return 0;
607 } else {
608 return 1;
609 }
2d21ac55
A
610}
611
6d2010ae 612STATIC int
2d21ac55
A
613sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
614{
2d21ac55 615 int retval;
b0d623f7 616 struct tty *tp;
2d21ac55 617
2d21ac55
A
618 /* This is very racy but list lock is held.. Hmmm. */
619 if ((p->p_flag & P_CONTROLT) == 0 ||
0a7de745
A
620 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
621 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
622 tp->t_dev != (dev_t)*(int*)arg) {
623 retval = 0;
624 } else {
2d21ac55 625 retval = 1;
0a7de745 626 }
2d21ac55 627
0a7de745 628 return retval;
2d21ac55
A
629}
630
6d2010ae 631STATIC int
2d21ac55
A
632sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
633{
634 kauth_cred_t my_cred;
635 uid_t uid;
636
0a7de745
A
637 if (p->p_ucred == NULL) {
638 return 0;
639 }
2d21ac55
A
640 my_cred = kauth_cred_proc_ref(p);
641 uid = kauth_cred_getuid(my_cred);
642 kauth_cred_unref(&my_cred);
643
0a7de745
A
644 if (uid != (uid_t)*(int*)arg) {
645 return 0;
646 } else {
647 return 1;
648 }
2d21ac55
A
649}
650
651
6d2010ae 652STATIC int
2d21ac55
A
653sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
654{
655 kauth_cred_t my_cred;
656 uid_t ruid;
657
0a7de745
A
658 if (p->p_ucred == NULL) {
659 return 0;
660 }
2d21ac55 661 my_cred = kauth_cred_proc_ref(p);
6d2010ae 662 ruid = kauth_cred_getruid(my_cred);
2d21ac55
A
663 kauth_cred_unref(&my_cred);
664
0a7de745
A
665 if (ruid != (uid_t)*(int*)arg) {
666 return 0;
667 } else {
668 return 1;
669 }
2d21ac55
A
670}
671
1c79356b
A
672/*
673 * try over estimating by 5 procs
674 */
f427ee49 675#define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc))
2d21ac55 676struct sysdoproc_args {
f427ee49
A
677 size_t buflen;
678 void *kprocp;
2d21ac55 679 boolean_t is_64_bit;
f427ee49 680 user_addr_t dp;
2d21ac55 681 size_t needed;
f427ee49 682 unsigned int sizeof_kproc;
316670eb 683 int *errorp;
2d21ac55
A
684 int uidcheck;
685 int ruidcheck;
686 int ttycheck;
687 int uidval;
688};
689
690int
316670eb 691sysdoproc_callback(proc_t p, void *arg)
2d21ac55 692{
316670eb 693 struct sysdoproc_args *args = arg;
2d21ac55
A
694
695 if (args->buflen >= args->sizeof_kproc) {
0a7de745
A
696 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0)) {
697 return PROC_RETURNED;
698 }
699 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0)) {
700 return PROC_RETURNED;
701 }
702 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0)) {
703 return PROC_RETURNED;
704 }
2d21ac55
A
705
706 bzero(args->kprocp, args->sizeof_kproc);
0a7de745 707 if (args->is_64_bit) {
316670eb 708 fill_user64_proc(p, args->kprocp);
0a7de745 709 } else {
316670eb 710 fill_user32_proc(p, args->kprocp);
0a7de745 711 }
316670eb 712 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
2d21ac55
A
713 if (error) {
714 *args->errorp = error;
0a7de745 715 return PROC_RETURNED_DONE;
2d21ac55
A
716 }
717 args->dp += args->sizeof_kproc;
718 args->buflen -= args->sizeof_kproc;
719 }
720 args->needed += args->sizeof_kproc;
0a7de745 721 return PROC_RETURNED;
2d21ac55 722}
1c79356b 723
6d2010ae
A
724SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
725STATIC int
726sysctl_prochandle SYSCTL_HANDLER_ARGS
1c79356b 727{
0a7de745
A
728 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
729 int *name = arg1; /* oid element argument vector */
730 int namelen = arg2; /* number of oid element arguments */
6d2010ae
A
731 user_addr_t where = req->oldptr;/* user buffer copy out address */
732
91447636
A
733 user_addr_t dp = where;
734 size_t needed = 0;
f427ee49 735 size_t buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
1c79356b 736 int error = 0;
316670eb 737 boolean_t is_64_bit = proc_is64bit(current_proc());
b0d623f7
A
738 struct user32_kinfo_proc user32_kproc;
739 struct user64_kinfo_proc user_kproc;
91447636 740 int sizeof_kproc;
316670eb 741 void *kprocp;
2d21ac55
A
742 int (*filterfn)(proc_t, void *) = 0;
743 struct sysdoproc_args args;
744 int uidcheck = 0;
745 int ruidcheck = 0;
746 int ttycheck = 0;
1c79356b 747
0a7de745
A
748 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL)) {
749 return EINVAL;
750 }
6d2010ae 751
91447636
A
752 if (is_64_bit) {
753 sizeof_kproc = sizeof(user_kproc);
316670eb
A
754 kprocp = &user_kproc;
755 } else {
b0d623f7 756 sizeof_kproc = sizeof(user32_kproc);
316670eb 757 kprocp = &user32_kproc;
91447636 758 }
2d21ac55 759
6d2010ae 760 switch (cmd) {
0a7de745
A
761 case KERN_PROC_PID:
762 filterfn = sysdoproc_filt_KERN_PROC_PID;
763 break;
1c79356b 764
0a7de745
A
765 case KERN_PROC_PGRP:
766 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
767 break;
1c79356b 768
0a7de745
A
769 case KERN_PROC_TTY:
770 ttycheck = 1;
771 break;
1c79356b 772
0a7de745
A
773 case KERN_PROC_UID:
774 uidcheck = 1;
775 break;
1c79356b 776
0a7de745
A
777 case KERN_PROC_RUID:
778 ruidcheck = 1;
779 break;
2d21ac55 780
0a7de745
A
781 case KERN_PROC_ALL:
782 break;
6d2010ae 783
0a7de745
A
784 default:
785 /* must be kern.proc.<unknown> */
786 return ENOTSUP;
1c79356b 787 }
2d21ac55
A
788
789 error = 0;
790 args.buflen = buflen;
791 args.kprocp = kprocp;
792 args.is_64_bit = is_64_bit;
793 args.dp = dp;
794 args.needed = needed;
795 args.errorp = &error;
796 args.uidcheck = uidcheck;
797 args.ruidcheck = ruidcheck;
798 args.ttycheck = ttycheck;
799 args.sizeof_kproc = sizeof_kproc;
0a7de745 800 if (namelen) {
316670eb 801 args.uidval = name[0];
0a7de745 802 }
2d21ac55 803
cb323159 804 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
0a7de745 805 sysdoproc_callback, &args, filterfn, name);
5ba3f43e 806
0a7de745
A
807 if (error) {
808 return error;
809 }
2d21ac55
A
810
811 dp = args.dp;
812 needed = args.needed;
0a7de745 813
91447636 814 if (where != USER_ADDR_NULL) {
6d2010ae 815 req->oldlen = dp - where;
0a7de745
A
816 if (needed > req->oldlen) {
817 return ENOMEM;
818 }
1c79356b
A
819 } else {
820 needed += KERN_PROCSLOP;
6d2010ae 821 req->oldlen = needed;
1c79356b 822 }
6d2010ae
A
823 /* adjust index so we return the right required/consumed amount */
824 req->oldidx += req->oldlen;
0a7de745 825 return 0;
1c79356b 826}
316670eb 827
f427ee49 828
6d2010ae
A
829/*
830 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
831 * in the sysctl declaration itself, which comes into the handler function
832 * as 'oidp->oid_arg2'.
833 *
834 * For these particular sysctls, since they have well known OIDs, we could
835 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
836 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
837 * of a well known value with a common handler function. This is desirable,
838 * because we want well known values to "go away" at some future date.
839 *
840 * It should be noted that the value of '((int *)arg1)[1]' is used for many
841 * an integer parameter to the subcommand for many of these sysctls; we'd
842 * rather have used '((int *)arg1)[0]' for that, or even better, an element
843 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
844 * and then use leaf-node permissions enforcement, but that would have
845 * necessitated modifying user space code to correspond to the interface
846 * change, and we are striving for binary backward compatibility here; even
847 * though these are SPI, and not intended for use by user space applications
848 * which are not themselves system tools or libraries, some applications
849 * have erroneously used them.
850 */
0a7de745
A
851SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
852 0, /* Pointer argument (arg1) */
853 KERN_PROC_ALL, /* Integer argument (arg2) */
854 sysctl_prochandle, /* Handler function */
855 NULL, /* Data is size variant on ILP32/LP64 */
856 "");
857SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
858 0, /* Pointer argument (arg1) */
859 KERN_PROC_PID, /* Integer argument (arg2) */
860 sysctl_prochandle, /* Handler function */
861 NULL, /* Data is size variant on ILP32/LP64 */
862 "");
863SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
864 0, /* Pointer argument (arg1) */
865 KERN_PROC_TTY, /* Integer argument (arg2) */
866 sysctl_prochandle, /* Handler function */
867 NULL, /* Data is size variant on ILP32/LP64 */
868 "");
869SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_PGRP, /* Integer argument (arg2) */
872 sysctl_prochandle, /* Handler function */
873 NULL, /* Data is size variant on ILP32/LP64 */
874 "");
875SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_UID, /* Integer argument (arg2) */
878 sysctl_prochandle, /* Handler function */
879 NULL, /* Data is size variant on ILP32/LP64 */
880 "");
881SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_RUID, /* Integer argument (arg2) */
884 sysctl_prochandle, /* Handler function */
885 NULL, /* Data is size variant on ILP32/LP64 */
886 "");
887SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_LCID, /* Integer argument (arg2) */
890 sysctl_prochandle, /* Handler function */
891 NULL, /* Data is size variant on ILP32/LP64 */
892 "");
6d2010ae 893
1c79356b 894
1c79356b 895/*
316670eb 896 * Fill in non-zero fields of an eproc structure for the specified process.
1c79356b 897 */
6d2010ae 898STATIC void
316670eb 899fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
1c79356b 900{
2d21ac55 901 struct tty *tp;
316670eb
A
902 struct pgrp *pg;
903 struct session *sessp;
2d21ac55 904 kauth_cred_t my_cred;
2d21ac55
A
905
906 pg = proc_pgrp(p);
907 sessp = proc_session(p);
1c79356b 908
2d21ac55 909 if (pg != PGRP_NULL) {
2d21ac55
A
910 ep->e_pgid = p->p_pgrpid;
911 ep->e_jobc = pg->pg_jobc;
0a7de745 912 if (sessp != SESSION_NULL && sessp->s_ttyvp) {
55e303ae 913 ep->e_flag = EPROC_CTTY;
0a7de745 914 }
55e303ae 915 }
2d21ac55 916 ep->e_ppid = p->p_ppid;
91447636 917 if (p->p_ucred) {
2d21ac55 918 my_cred = kauth_cred_proc_ref(p);
91447636
A
919
920 /* A fake historical pcred */
6d2010ae
A
921 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
922 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
923 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
924 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
316670eb 925
91447636 926 /* A fake historical *kauth_cred_t */
f427ee49
A
927 unsigned long refcnt = os_atomic_load(&my_cred->cr_ref, relaxed);
928 ep->e_ucred.cr_ref = (uint32_t)MIN(refcnt, UINT32_MAX);
2d21ac55 929 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
f427ee49 930 ep->e_ucred.cr_ngroups = (short)posix_cred_get(my_cred)->cr_ngroups;
316670eb 931 bcopy(posix_cred_get(my_cred)->cr_groups,
0a7de745 932 ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t));
91447636 933
2d21ac55 934 kauth_cred_unref(&my_cred);
55e303ae 935 }
55e303ae 936
2d21ac55 937 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
0a7de745 938 (tp = SESSION_TP(sessp))) {
1c79356b 939 ep->e_tdev = tp->t_dev;
2d21ac55 940 ep->e_tpgid = sessp->s_ttypgrpid;
0a7de745 941 } else {
1c79356b 942 ep->e_tdev = NODEV;
0a7de745 943 }
55e303ae 944
316670eb 945 if (sessp != SESSION_NULL) {
0a7de745 946 if (SESS_LEADER(p, sessp)) {
316670eb 947 ep->e_flag |= EPROC_SLEADER;
0a7de745 948 }
2d21ac55 949 session_rele(sessp);
316670eb 950 }
0a7de745 951 if (pg != PGRP_NULL) {
2d21ac55 952 pg_rele(pg);
0a7de745 953 }
1c79356b 954}
55e303ae 955
91447636 956/*
316670eb 957 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
91447636 958 */
6d2010ae 959STATIC void
316670eb 960fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
91447636 961{
2d21ac55 962 struct tty *tp;
316670eb
A
963 struct pgrp *pg;
964 struct session *sessp;
2d21ac55 965 kauth_cred_t my_cred;
0a7de745 966
2d21ac55
A
967 pg = proc_pgrp(p);
968 sessp = proc_session(p);
91447636 969
2d21ac55 970 if (pg != PGRP_NULL) {
2d21ac55
A
971 ep->e_pgid = p->p_pgrpid;
972 ep->e_jobc = pg->pg_jobc;
0a7de745 973 if (sessp != SESSION_NULL && sessp->s_ttyvp) {
316670eb 974 ep->e_flag = EPROC_CTTY;
0a7de745 975 }
91447636 976 }
2d21ac55 977 ep->e_ppid = p->p_ppid;
91447636 978 if (p->p_ucred) {
2d21ac55 979 my_cred = kauth_cred_proc_ref(p);
91447636
A
980
981 /* A fake historical pcred */
6d2010ae
A
982 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
983 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
984 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
985 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
91447636
A
986
987 /* A fake historical *kauth_cred_t */
f427ee49
A
988 unsigned long refcnt = os_atomic_load(&my_cred->cr_ref, relaxed);
989 ep->e_ucred.cr_ref = (uint32_t)MIN(refcnt, UINT32_MAX);
2d21ac55 990 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
f427ee49 991 ep->e_ucred.cr_ngroups = (short)posix_cred_get(my_cred)->cr_ngroups;
316670eb 992 bcopy(posix_cred_get(my_cred)->cr_groups,
0a7de745 993 ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t));
91447636 994
2d21ac55 995 kauth_cred_unref(&my_cred);
91447636 996 }
91447636 997
2d21ac55 998 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
0a7de745 999 (tp = SESSION_TP(sessp))) {
91447636 1000 ep->e_tdev = tp->t_dev;
2d21ac55 1001 ep->e_tpgid = sessp->s_ttypgrpid;
0a7de745 1002 } else {
91447636 1003 ep->e_tdev = NODEV;
0a7de745 1004 }
91447636 1005
316670eb 1006 if (sessp != SESSION_NULL) {
0a7de745 1007 if (SESS_LEADER(p, sessp)) {
316670eb 1008 ep->e_flag |= EPROC_SLEADER;
0a7de745 1009 }
2d21ac55 1010 session_rele(sessp);
316670eb 1011 }
0a7de745 1012 if (pg != PGRP_NULL) {
2d21ac55 1013 pg_rele(pg);
0a7de745 1014 }
91447636
A
1015}
1016
1c79356b
A
1017/*
1018 * Fill in an eproc structure for the specified process.
316670eb 1019 * bzeroed by our caller, so only set non-zero fields.
1c79356b 1020 */
6d2010ae 1021STATIC void
316670eb 1022fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1c79356b 1023{
f427ee49 1024 exp->p_starttime.tv_sec = (user32_time_t)p->p_start.tv_sec;
b0d623f7 1025 exp->p_starttime.tv_usec = p->p_start.tv_usec;
316670eb 1026 exp->p_flag = p->p_flag;
0a7de745 1027 if (p->p_lflag & P_LTRACED) {
2d21ac55 1028 exp->p_flag |= P_TRACED;
0a7de745
A
1029 }
1030 if (p->p_lflag & P_LPPWAIT) {
2d21ac55 1031 exp->p_flag |= P_PPWAIT;
0a7de745
A
1032 }
1033 if (p->p_lflag & P_LEXIT) {
2d21ac55 1034 exp->p_flag |= P_WEXIT;
0a7de745 1035 }
316670eb
A
1036 exp->p_stat = p->p_stat;
1037 exp->p_pid = p->p_pid;
1038 exp->p_oppid = p->p_oppid;
1c79356b 1039 /* Mach related */
316670eb
A
1040 exp->p_debugger = p->p_debugger;
1041 exp->sigwait = p->sigwait;
1c79356b 1042 /* scheduling */
2d21ac55 1043#ifdef _PROC_HAS_SCHEDINFO_
316670eb
A
1044 exp->p_estcpu = p->p_estcpu;
1045 exp->p_pctcpu = p->p_pctcpu;
1046 exp->p_slptime = p->p_slptime;
2d21ac55 1047#endif
316670eb 1048 exp->p_realtimer.it_interval.tv_sec =
0a7de745 1049 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
316670eb 1050 exp->p_realtimer.it_interval.tv_usec =
0a7de745 1051 (__int32_t)p->p_realtimer.it_interval.tv_usec;
316670eb
A
1052
1053 exp->p_realtimer.it_value.tv_sec =
0a7de745 1054 (user32_time_t)p->p_realtimer.it_value.tv_sec;
316670eb 1055 exp->p_realtimer.it_value.tv_usec =
0a7de745 1056 (__int32_t)p->p_realtimer.it_value.tv_usec;
316670eb
A
1057
1058 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1059 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1060
1061 exp->p_sigignore = p->p_sigignore;
1062 exp->p_sigcatch = p->p_sigcatch;
1063 exp->p_priority = p->p_priority;
1064 exp->p_nice = p->p_nice;
1065 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
f427ee49 1066 exp->p_xstat = (u_short)MIN(p->p_xstat, USHRT_MAX);
316670eb 1067 exp->p_acflag = p->p_acflag;
91447636
A
1068}
1069
1070/*
1071 * Fill in an LP64 version of extern_proc structure for the specified process.
1072 */
6d2010ae 1073STATIC void
316670eb 1074fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
91447636 1075{
2d21ac55
A
1076 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1077 exp->p_starttime.tv_usec = p->p_start.tv_usec;
316670eb 1078 exp->p_flag = p->p_flag;
0a7de745 1079 if (p->p_lflag & P_LTRACED) {
2d21ac55 1080 exp->p_flag |= P_TRACED;
0a7de745
A
1081 }
1082 if (p->p_lflag & P_LPPWAIT) {
2d21ac55 1083 exp->p_flag |= P_PPWAIT;
0a7de745
A
1084 }
1085 if (p->p_lflag & P_LEXIT) {
2d21ac55 1086 exp->p_flag |= P_WEXIT;
0a7de745 1087 }
316670eb
A
1088 exp->p_stat = p->p_stat;
1089 exp->p_pid = p->p_pid;
1090 exp->p_oppid = p->p_oppid;
91447636 1091 /* Mach related */
316670eb
A
1092 exp->p_debugger = p->p_debugger;
1093 exp->sigwait = p->sigwait;
91447636 1094 /* scheduling */
2d21ac55 1095#ifdef _PROC_HAS_SCHEDINFO_
316670eb
A
1096 exp->p_estcpu = p->p_estcpu;
1097 exp->p_pctcpu = p->p_pctcpu;
1098 exp->p_slptime = p->p_slptime;
2d21ac55 1099#endif
91447636
A
1100 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1101 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
316670eb 1102
91447636
A
1103 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1104 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
316670eb 1105
91447636
A
1106 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1107 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
316670eb
A
1108
1109 exp->p_sigignore = p->p_sigignore;
1110 exp->p_sigcatch = p->p_sigcatch;
1111 exp->p_priority = p->p_priority;
1112 exp->p_nice = p->p_nice;
1113 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
f427ee49 1114 exp->p_xstat = (u_short)MIN(p->p_xstat, USHRT_MAX);
316670eb 1115 exp->p_acflag = p->p_acflag;
1c79356b
A
1116}
1117
6d2010ae 1118STATIC void
316670eb 1119fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
55e303ae 1120{
316670eb 1121 /* on a 64 bit kernel, 32 bit users get some truncated information */
b0d623f7
A
1122 fill_user32_externproc(p, &kp->kp_proc);
1123 fill_user32_eproc(p, &kp->kp_eproc);
55e303ae
A
1124}
1125
6d2010ae 1126STATIC void
316670eb 1127fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
91447636 1128{
b0d623f7
A
1129 fill_user64_externproc(p, &kp->kp_proc);
1130 fill_user64_eproc(p, &kp->kp_eproc);
91447636
A
1131}
1132
6d2010ae
A
1133STATIC int
1134sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1c79356b 1135{
0a7de745
A
1136 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1137 int *name = arg1; /* oid element argument vector */
1138 int namelen = arg2; /* number of oid element arguments */
1139 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1140 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
6d2010ae
A
1141// user_addr_t newp = req->newptr; /* user buffer copy in address */
1142// size_t newlen = req->newlen; /* user buffer copy in size */
1143
0a7de745 1144 int ret = 0;
1c79356b 1145
0a7de745
A
1146 if (namelen == 0) {
1147 return ENOTSUP;
1148 }
39037602 1149
0a7de745 1150 switch (name[0]) {
1c79356b
A
1151 case KERN_KDEFLAGS:
1152 case KERN_KDDFLAGS:
1153 case KERN_KDENABLE:
1154 case KERN_KDGETBUF:
1155 case KERN_KDSETUP:
1156 case KERN_KDREMOVE:
1157 case KERN_KDSETREG:
1158 case KERN_KDGETREG:
1159 case KERN_KDREADTR:
3e170ce0
A
1160 case KERN_KDWRITETR:
1161 case KERN_KDWRITEMAP:
39037602 1162 case KERN_KDTEST:
1c79356b
A
1163 case KERN_KDPIDTR:
1164 case KERN_KDTHRMAP:
1165 case KERN_KDPIDEX:
1c79356b 1166 case KERN_KDSETBUF:
39236c6e 1167 case KERN_KDREADCURTHRMAP:
316670eb 1168 case KERN_KDSET_TYPEFILTER:
3e170ce0 1169 case KERN_KDBUFWAIT:
39236c6e 1170 case KERN_KDCPUMAP:
3e170ce0
A
1171 case KERN_KDWRITEMAP_V3:
1172 case KERN_KDWRITETR_V3:
39037602
A
1173 ret = kdbg_control(name, namelen, oldp, oldlenp);
1174 break;
1c79356b 1175 default:
0a7de745 1176 ret = ENOTSUP;
1c79356b
A
1177 break;
1178 }
6d2010ae
A
1179
1180 /* adjust index so we return the right required/consumed amount */
0a7de745 1181 if (!ret) {
6d2010ae 1182 req->oldidx += req->oldlen;
0a7de745 1183 }
6d2010ae 1184
0a7de745 1185 return ret;
1c79356b 1186}
0a7de745
A
1187SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
1188 0, /* Pointer argument (arg1) */
1189 0, /* Integer argument (arg2) */
1190 sysctl_kdebug_ops, /* Handler function */
1191 NULL, /* Data pointer */
1192 "");
1c79356b 1193
1c79356b 1194
f427ee49 1195#if defined(XNU_TARGET_OS_OSX)
1c79356b 1196/*
55e303ae
A
1197 * Return the top *sizep bytes of the user stack, or the entire area of the
1198 * user stack down through the saved exec_path, whichever is smaller.
1c79356b 1199 */
6d2010ae
A
1200STATIC int
1201sysctl_doprocargs SYSCTL_HANDLER_ARGS
1202{
0a7de745
A
1203 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1204 int *name = arg1; /* oid element argument vector */
1205 int namelen = arg2; /* number of oid element arguments */
1206 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1207 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
6d2010ae
A
1208// user_addr_t newp = req->newptr; /* user buffer copy in address */
1209// size_t newlen = req->newlen; /* user buffer copy in size */
1210 int error;
1211
1212 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1213
1214 /* adjust index so we return the right required/consumed amount */
0a7de745 1215 if (!error) {
6d2010ae 1216 req->oldidx += req->oldlen;
0a7de745 1217 }
6d2010ae 1218
0a7de745 1219 return error;
55e303ae 1220}
0a7de745
A
1221SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
1222 0, /* Pointer argument (arg1) */
1223 0, /* Integer argument (arg2) */
1224 sysctl_doprocargs, /* Handler function */
1225 NULL, /* Data pointer */
1226 "");
f427ee49 1227#endif /* defined(XNU_TARGET_OS_OSX) */
6d2010ae
A
1228
1229STATIC int
1230sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1231{
0a7de745
A
1232 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1233 int *name = arg1; /* oid element argument vector */
1234 int namelen = arg2; /* number of oid element arguments */
1235 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1236 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
6d2010ae
A
1237// user_addr_t newp = req->newptr; /* user buffer copy in address */
1238// size_t newlen = req->newlen; /* user buffer copy in size */
1239 int error;
55e303ae 1240
6d2010ae
A
1241 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1242
1243 /* adjust index so we return the right required/consumed amount */
0a7de745 1244 if (!error) {
6d2010ae 1245 req->oldidx += req->oldlen;
0a7de745 1246 }
6d2010ae 1247
0a7de745 1248 return error;
55e303ae 1249}
0a7de745
A
1250SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED,
1251 0, /* Pointer argument (arg1) */
1252 0, /* Integer argument (arg2) */
1253 sysctl_doprocargs2, /* Handler function */
1254 NULL, /* Data pointer */
1255 "");
55e303ae 1256
f427ee49 1257#define SYSCTL_PROCARGS_READ_ENVVARS_ENTITLEMENT "com.apple.private.read-environment-variables"
6d2010ae 1258STATIC int
0a7de745
A
1259sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1260 size_t *sizep, proc_t cur_proc, int argc_yes)
1c79356b 1261{
f427ee49
A
1262 assert(sizep != NULL);
1263 proc_t p = NULL;
1264 size_t buflen = where != USER_ADDR_NULL ? *sizep : 0;
1c79356b 1265 int error = 0;
f427ee49 1266 struct _vm_map *proc_map = NULL;
1c79356b 1267 struct task * task;
f427ee49 1268 vm_map_copy_t tmp = NULL;
0a7de745
A
1269 user_addr_t arg_addr;
1270 size_t arg_size;
1c79356b 1271 caddr_t data;
0a7de745 1272 size_t argslen = 0;
f427ee49
A
1273 size_t size = 0;
1274 vm_offset_t copy_start = 0, copy_end;
1275 vm_offset_t smallbuffer_start;
1c79356b
A
1276 kern_return_t ret;
1277 int pid;
2d21ac55
A
1278 kauth_cred_t my_cred;
1279 uid_t uid;
5ba3f43e 1280 int argc = -1;
f427ee49
A
1281 size_t argvsize;
1282 size_t remaining;
1283 size_t current_arg_index;
1284 size_t current_arg_len;
1285 const char * current_arg;
1286 bool omit_env_vars = true;
1c79356b 1287
0a7de745 1288 if (namelen < 1) {
f427ee49
A
1289 error = EINVAL;
1290 goto finish;
0a7de745 1291 }
1c79356b 1292
0a7de745
A
1293 if (argc_yes) {
1294 buflen -= sizeof(int); /* reserve first word to return argc */
1295 }
91447636
A
1296 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1297 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1298 /* is not NULL then the caller wants us to return the length needed to */
0a7de745 1299 /* hold the data we would return */
91447636 1300 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
f427ee49
A
1301 error = EINVAL;
1302 goto finish;
1c79356b 1303 }
1c79356b
A
1304
1305 /*
1306 * Lookup process by pid
1307 */
1308 pid = name[0];
2d21ac55 1309 p = proc_find(pid);
1c79356b 1310 if (p == NULL) {
f427ee49
A
1311 error = EINVAL;
1312 goto finish;
1313 }
1314
1315 /* Allow reading environment variables if any of the following are true:
1316 * - kernel is DEVELOPMENT || DEBUG
1317 * - target process is same as current_proc()
1318 * - target process is not cs_restricted
1319 * - SIP is off
1320 * - caller has an entitlement
1321 */
1322
1323#if DEVELOPMENT || DEBUG
1324 omit_env_vars = false;
1325#endif
1326 if (p == current_proc() ||
1327 !cs_restricted(p) ||
1328#if CONFIG_CSR
1329 csr_check(CSR_ALLOW_UNRESTRICTED_DTRACE) == 0 ||
1330#endif
1331 IOTaskHasEntitlement(current_task(), SYSCTL_PROCARGS_READ_ENVVARS_ENTITLEMENT)
1332 ) {
1333 omit_env_vars = false;
1c79356b
A
1334 }
1335
1336 /*
1337 * Copy the top N bytes of the stack.
1338 * On all machines we have so far, the stack grows
1339 * downwards.
1340 *
1341 * If the user expects no more than N bytes of
1342 * argument list, use that as a guess for the
1343 * size.
1344 */
1345
2d21ac55 1346 if (!p->user_stack) {
f427ee49
A
1347 error = EINVAL;
1348 goto finish;
2d21ac55 1349 }
1c79356b 1350
f427ee49
A
1351 /* save off argc before releasing the proc */
1352 argc = p->p_argc;
5ba3f43e 1353
f427ee49
A
1354 argslen = p->p_argslen;
1355
1356 /*
1357 * When these sysctls were introduced, the first string in the strings
1358 * section was just the bare path of the executable. However, for security
1359 * reasons we now prefix this string with executable_path= so it can be
1360 * parsed getenv style. To avoid binary compatability issues with exising
1361 * callers of this sysctl, we strip it off here.
1362 * (rdar://problem/13746466)
1363 */
1364#define EXECUTABLE_KEY "executable_path="
1365 argslen -= strlen(EXECUTABLE_KEY);
1366
1367 if (where == USER_ADDR_NULL && !omit_env_vars) {
1368 /* caller only wants to know length of proc args data.
1369 * If we don't need to omit environment variables, we can skip
1370 * copying the target process stack */
1371 goto calculate_size;
91447636 1372 }
5ba3f43e 1373
2d21ac55
A
1374 my_cred = kauth_cred_proc_ref(p);
1375 uid = kauth_cred_getuid(my_cred);
1376 kauth_cred_unref(&my_cred);
1377
0a7de745
A
1378 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1379 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
f427ee49
A
1380 error = EINVAL;
1381 goto finish;
2d21ac55 1382 }
91447636 1383
f427ee49 1384 arg_size = round_page(argslen);
91447636
A
1385
1386 arg_addr = p->user_stack - arg_size;
1c79356b 1387
1c79356b
A
1388 /*
1389 * Before we can block (any VM code), make another
1390 * reference to the map to keep it alive. We do
1391 * that by getting a reference on the task itself.
1392 */
1393 task = p->task;
2d21ac55 1394 if (task == NULL) {
f427ee49
A
1395 error = EINVAL;
1396 goto finish;
2d21ac55 1397 }
5ba3f43e 1398
0b4e3aa0 1399 /*
91447636
A
1400 * Once we have a task reference we can convert that into a
1401 * map reference, which we will use in the calls below. The
1402 * task/process may change its map after we take this reference
1403 * (see execve), but the worst that will happen then is a return
1404 * of stale info (which is always a possibility).
0b4e3aa0 1405 */
91447636 1406 task_reference(task);
2d21ac55 1407 proc_rele(p);
f427ee49 1408 p = NULL;
91447636
A
1409 proc_map = get_task_map_reference(task);
1410 task_deallocate(task);
0a7de745
A
1411
1412 if (proc_map == NULL) {
f427ee49
A
1413 error = EINVAL;
1414 goto finish;
0a7de745 1415 }
1c79356b 1416
f427ee49 1417 ret = kmem_alloc(kernel_map, &copy_start, arg_size, VM_KERN_MEMORY_BSD);
1c79356b 1418 if (ret != KERN_SUCCESS) {
f427ee49
A
1419 error = ENOMEM;
1420 goto finish;
1c79356b 1421 }
f427ee49 1422 bzero((void *)copy_start, arg_size);
1c79356b 1423
f427ee49
A
1424 /* End of buffer should be page aligned */
1425 assert(copy_start + arg_size == round_page(copy_start + arg_size));
1426 copy_end = copy_start + arg_size;
1c79356b 1427
0a7de745
A
1428 if (vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1429 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
f427ee49
A
1430 error = EIO;
1431 goto finish;
1c79356b
A
1432 }
1433
1434 /*
1435 * Now that we've done the copyin from the process'
1436 * map, we can release the reference to it.
1437 */
91447636 1438 vm_map_deallocate(proc_map);
f427ee49 1439 proc_map = NULL;
1c79356b 1440
0a7de745
A
1441 if (vm_map_copy_overwrite(kernel_map,
1442 (vm_map_address_t)copy_start,
eb6b6ca3 1443 tmp, (vm_map_size_t) arg_size, FALSE) != KERN_SUCCESS) {
f427ee49
A
1444 error = EIO;
1445 goto finish;
1446 }
1447 /* tmp was consumed */
1448 tmp = NULL;
1449
1450 if (omit_env_vars) {
1451 argvsize = 0;
1452
1453 /* Iterate over everything in argv, plus one for the bare executable path */
1454 for (current_arg_index = 0; current_arg_index < argc + 1 && argvsize < argslen; ++current_arg_index) {
1455 current_arg = (const char *)(copy_end - argslen) + argvsize;
1456 remaining = argslen - argvsize;
1457 current_arg_len = strnlen(current_arg, remaining);
1458 if (current_arg_len < remaining) {
1459 /* We have space for the null terminator */
1460 current_arg_len += 1;
1461
1462 if (current_arg_index == 0) {
1463 /* The bare executable path may have multiple null bytes after it for alignment */
1464 while (current_arg_len < remaining && current_arg[current_arg_len] == 0) {
1465 current_arg_len += 1;
1466 }
1467 }
1468 }
1469 argvsize += current_arg_len;
1470 }
1471 assert(argvsize <= argslen);
1472
1473 /* Adjust argslen and copy_end to make the copyout range extend to the end of argv */
1474 copy_end = copy_end - argslen + argvsize;
1475 argslen = argvsize;
1476 }
1477
1478 if (where == USER_ADDR_NULL) {
1479 /* Skip copyout */
1480 goto calculate_size;
1c79356b
A
1481 }
1482
f427ee49 1483 if (buflen >= argslen) {
2d21ac55
A
1484 data = (caddr_t) (copy_end - argslen);
1485 size = argslen;
55e303ae 1486 } else {
f427ee49
A
1487 /*
1488 * Before rdar://25397314, this function contained incorrect logic when buflen is less
1489 * than argslen. The problem was that it copied in `buflen` bytes from the end of the target
1490 * process user stack into the beginning of a buffer of size round_page(buflen), and then
1491 * copied out `buflen` bytes from the end of this buffer. The effect of this was that
1492 * the caller of this sysctl would get zeros at the end of their buffer.
1493 *
1494 * To preserve this behavior, bzero everything from copy_end-round_page(buflen)+buflen to the
1495 * end of the buffer. This emulates copying in only `buflen` bytes.
1496 *
1497 *
1498 * In the old code:
1499 *
1500 * copy_start .... size: round_page(buflen) .... copy_end
1501 * [---copied in data (size: buflen)---|--- zeros ----------]
1502 * ^
1503 * data = copy_end - buflen
1504 *
1505 *
1506 * In the new code:
1507 * copy_start .... size: round_page(p->argslen) .... full copy_end
1508 * ^ ....................... p->argslen ...............................^
1509 * ^ ^ truncated copy_end ^
1510 * ^ ^ ^ ^
1511 * ^ ................ argslen ........................ ^
1512 * ^ ^ ^ ^
1513 * [-------copied in data (size: round_page(p->argslen))-------:----env vars---]
1514 * ^ ^
1515 * ^ data = copy_end - buflen
1516 * smallbuffer_start = max(copy_end - round_page(buflen), copy_start)
1517 *
1518 *
1519 * Full copy_end: copy_end calculated from copy_start + round_page(p->argslen)
1520 * Truncated copy_end: copy_end after truncation to remove environment variables.
1521 *
1522 * If environment variables were omitted, then we use the truncated copy_end, otherwise
1523 * we use full copy_end.
1524 *
1525 * smallbuffer_start: represents where copy_start would be in the old code.
1526 * data: The beginning of the region we copyout
1527 */
1528 smallbuffer_start = copy_end - round_page(buflen);
1529 if (smallbuffer_start < copy_start) {
1530 smallbuffer_start = copy_start;
1531 }
1532 bzero((void *)(smallbuffer_start + buflen), copy_end - (smallbuffer_start + buflen));
1533 data = (caddr_t) (copy_end - buflen);
1534 size = buflen;
3e170ce0
A
1535 }
1536
55e303ae
A
1537 if (argc_yes) {
1538 /* Put processes argc as the first word in the copyout buffer */
5ba3f43e 1539 suword(where, argc);
91447636
A
1540 error = copyout(data, (where + sizeof(int)), size);
1541 size += sizeof(int);
55e303ae
A
1542 } else {
1543 error = copyout(data, where, size);
1544
1545 /*
1546 * Make the old PROCARGS work to return the executable's path
1547 * But, only if there is enough space in the provided buffer
1548 *
0a7de745
A
1549 * on entry: data [possibily] points to the beginning of the path
1550 *
55e303ae
A
1551 * Note: we keep all pointers&sizes aligned to word boundries
1552 */
f427ee49 1553 if ((!error) && (buflen > 0 && (u_int)buflen > size)) {
91447636 1554 int binPath_sz, alignedBinPath_sz = 0;
55e303ae 1555 int extraSpaceNeeded, addThis;
91447636 1556 user_addr_t placeHere;
55e303ae 1557 char * str = (char *) data;
f427ee49 1558 size_t max_len = size;
55e303ae
A
1559
1560 /* Some apps are really bad about messing up their stacks
0a7de745
A
1561 * So, we have to be extra careful about getting the length
1562 * of the executing binary. If we encounter an error, we bail.
1563 */
55e303ae
A
1564
1565 /* Limit ourselves to PATH_MAX paths */
0a7de745
A
1566 if (max_len > PATH_MAX) {
1567 max_len = PATH_MAX;
1568 }
55e303ae
A
1569
1570 binPath_sz = 0;
1571
0a7de745 1572 while ((binPath_sz < max_len - 1) && (*str++ != 0)) {
55e303ae 1573 binPath_sz++;
0a7de745 1574 }
55e303ae 1575
91447636 1576 /* If we have a NUL terminator, copy it, too */
0a7de745
A
1577 if (binPath_sz < max_len - 1) {
1578 binPath_sz += 1;
1579 }
55e303ae
A
1580
1581 /* Pre-Flight the space requiremnts */
1582
1583 /* Account for the padding that fills out binPath to the next word */
0a7de745 1584 alignedBinPath_sz += (binPath_sz & (sizeof(int) - 1)) ? (sizeof(int) - (binPath_sz & (sizeof(int) - 1))) : 0;
55e303ae
A
1585
1586 placeHere = where + size;
1587
0a7de745
A
1588 /* Account for the bytes needed to keep placeHere word aligned */
1589 addThis = (placeHere & (sizeof(int) - 1)) ? (sizeof(int) - (placeHere & (sizeof(int) - 1))) : 0;
55e303ae
A
1590
1591 /* Add up all the space that is needed */
91447636 1592 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
55e303ae
A
1593
1594 /* is there is room to tack on argv[0]? */
f427ee49 1595 if ((buflen & ~(sizeof(int) - 1)) >= (size + extraSpaceNeeded)) {
55e303ae
A
1596 placeHere += addThis;
1597 suword(placeHere, 0);
91447636 1598 placeHere += sizeof(int);
55e303ae 1599 suword(placeHere, 0xBFFF0000);
91447636 1600 placeHere += sizeof(int);
55e303ae 1601 suword(placeHere, 0);
91447636 1602 placeHere += sizeof(int);
55e303ae 1603 error = copyout(data, placeHere, binPath_sz);
0a7de745 1604 if (!error) {
55e303ae
A
1605 placeHere += binPath_sz;
1606 suword(placeHere, 0);
1607 size += extraSpaceNeeded;
1608 }
1609 }
1610 }
1611 }
1612
f427ee49
A
1613calculate_size:
1614 /* Size has already been calculated for the where != NULL case */
1615 if (where == USER_ADDR_NULL) {
1616 size = argslen;
1617 if (argc_yes) {
1618 size += sizeof(int);
1619 } else {
1620 /*
1621 * old PROCARGS will return the executable's path and plus some
1622 * extra space for work alignment and data tags
1623 */
1624 size += PATH_MAX + (6 * sizeof(int));
1625 }
1626 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1c79356b
A
1627 }
1628
f427ee49
A
1629 *sizep = size;
1630
1631finish:
1632 if (p != NULL) {
1633 proc_rele(p);
0a7de745 1634 }
f427ee49
A
1635 if (tmp != NULL) {
1636 vm_map_copy_discard(tmp);
1637 }
1638 if (proc_map != NULL) {
1639 vm_map_deallocate(proc_map);
1640 }
1641 if (copy_start != (vm_offset_t) 0) {
1642 kmem_free(kernel_map, copy_start, arg_size);
1643 }
1644 return error;
1c79356b 1645}
55e303ae
A
1646
1647
1648/*
2d21ac55 1649 * Max number of concurrent aio requests
55e303ae 1650 */
6d2010ae 1651STATIC int
2d21ac55
A
1652sysctl_aiomax
1653(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1654{
2d21ac55
A
1655 int new_value, changed;
1656 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1657 if (changed) {
0a7de745
A
1658 /* make sure the system-wide limit is greater than the per process limit */
1659 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS) {
55e303ae 1660 aio_max_requests = new_value;
0a7de745 1661 } else {
55e303ae 1662 error = EINVAL;
0a7de745 1663 }
55e303ae 1664 }
0a7de745 1665 return error;
2d21ac55 1666}
55e303ae
A
1667
1668
1669/*
2d21ac55 1670 * Max number of concurrent aio requests per process
55e303ae 1671 */
6d2010ae 1672STATIC int
2d21ac55
A
1673sysctl_aioprocmax
1674(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1675{
2d21ac55
A
1676 int new_value, changed;
1677 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1678 if (changed) {
1679 /* make sure per process limit is less than the system-wide limit */
0a7de745 1680 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX) {
55e303ae 1681 aio_max_requests_per_process = new_value;
0a7de745 1682 } else {
55e303ae 1683 error = EINVAL;
0a7de745 1684 }
55e303ae 1685 }
0a7de745 1686 return error;
2d21ac55 1687}
55e303ae
A
1688
1689
1690/*
2d21ac55 1691 * Max number of async IO worker threads
55e303ae 1692 */
6d2010ae 1693STATIC int
2d21ac55
A
1694sysctl_aiothreads
1695(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1696{
2d21ac55
A
1697 int new_value, changed;
1698 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1699 if (changed) {
1700 /* we only allow an increase in the number of worker threads */
0a7de745
A
1701 if (new_value > aio_worker_threads) {
1702 _aio_create_worker_threads((new_value - aio_worker_threads));
55e303ae 1703 aio_worker_threads = new_value;
0a7de745
A
1704 } else {
1705 error = EINVAL;
55e303ae 1706 }
55e303ae 1707 }
0a7de745 1708 return error;
2d21ac55 1709}
55e303ae
A
1710
1711
1712/*
2d21ac55 1713 * System-wide limit on the max number of processes
55e303ae 1714 */
6d2010ae 1715STATIC int
2d21ac55
A
1716sysctl_maxproc
1717(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 1718{
2d21ac55
A
1719 int new_value, changed;
1720 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1721 if (changed) {
b0d623f7 1722 AUDIT_ARG(value32, new_value);
2d21ac55 1723 /* make sure the system-wide limit is less than the configured hard
0a7de745
A
1724 * limit set at kernel compilation */
1725 if (new_value <= hard_maxproc && new_value > 0) {
2d21ac55 1726 maxproc = new_value;
0a7de745 1727 } else {
55e303ae 1728 error = EINVAL;
0a7de745
A
1729 }
1730 }
1731 return error;
1732}
1733
1734extern int sched_enable_smt;
1735STATIC int
1736sysctl_sched_enable_smt
1737(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1738{
1739 int new_value, changed;
1740 int error = sysctl_io_number(req, sched_enable_smt, sizeof(int), &new_value, &changed);
1741 if (error) {
1742 return error;
1743 }
1744 kern_return_t kret = KERN_SUCCESS;
1745 if (changed) {
1746 AUDIT_ARG(value32, new_value);
1747 if (new_value == 0) {
1748 sched_enable_smt = 0;
1749 kret = enable_smt_processors(false);
1750 } else {
1751 sched_enable_smt = 1;
1752 kret = enable_smt_processors(true);
1753 }
1754 }
1755 switch (kret) {
1756 case KERN_SUCCESS:
1757 error = 0;
1758 break;
1759 case KERN_INVALID_ARGUMENT:
1760 error = EINVAL;
1761 break;
1762 case KERN_FAILURE:
1763 error = EBUSY;
1764 break;
1765 default:
1766 error = ENOENT;
1767 break;
55e303ae 1768 }
0a7de745
A
1769
1770 return error;
2d21ac55 1771}
55e303ae 1772
0a7de745
A
1773SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1774 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1775 ostype, 0, "");
1776SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1777 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1778 osrelease, 0, "");
1779SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1780 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1781 (int *)NULL, BSD, "");
1782SYSCTL_STRING(_kern, KERN_VERSION, version,
1783 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1784 version, 0, "");
1785SYSCTL_STRING(_kern, OID_AUTO, uuid,
1786 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1787 &kernel_uuid_string[0], 0, "");
d9a64523
A
1788
1789SYSCTL_STRING(_kern, OID_AUTO, osbuildconfig,
0a7de745
A
1790 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1791 &osbuild_config[0], 0, "");
1792
cb323159
A
1793STATIC int
1794sysctl_protoboot(__unused struct sysctl_oid *oidp,
1795 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1796{
1797 int error = -1;
1798#if CONFIG_LOCKERBOOT
1799 char protoboot_buff[24];
1800 size_t protoboot_len = sizeof(protoboot_buff);
1801
1802 if (vnode_tag(rootvnode) == VT_LOCKERFS) {
1803 strlcpy(protoboot_buff, kernel_protoboot_mount, protoboot_len);
1804 error = sysctl_io_string(req, protoboot_buff, protoboot_len, 0, NULL);
1805 } else {
1806 error = EFTYPE;
1807 }
1808
1809#else
1810#pragma unused(req)
1811 error = ENOTSUP;
1812#endif
1813
1814 return error;
1815}
1816
1817SYSCTL_PROC(_kern, OID_AUTO, protoboot,
1818 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
1819 0, 0, sysctl_protoboot, "A", "");
d9a64523 1820
b0d623f7 1821#if DEBUG
5c9f4661
A
1822#ifndef DKPR
1823#define DKPR 1
1824#endif
1825#endif
1826
1827#if DKPR
b0d623f7 1828int debug_kprint_syscall = 0;
0a7de745 1829char debug_kprint_syscall_process[MAXCOMLEN + 1];
b0d623f7 1830
6d2010ae 1831/* Thread safe: bits and string value are not used to reclaim state */
0a7de745
A
1832SYSCTL_INT(_debug, OID_AUTO, kprint_syscall,
1833 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1834SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1835 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1836 "name of process for kprintf syscall tracing");
b0d623f7 1837
0a7de745
A
1838int
1839debug_kprint_current_process(const char **namep)
b0d623f7
A
1840{
1841 struct proc *p = current_proc();
1842
1843 if (p == NULL) {
1844 return 0;
1845 }
1846
1847 if (debug_kprint_syscall_process[0]) {
1848 /* user asked to scope tracing to a particular process name */
0a7de745
A
1849 if (0 == strncmp(debug_kprint_syscall_process,
1850 p->p_comm, sizeof(debug_kprint_syscall_process))) {
b0d623f7 1851 /* no value in telling the user that we traced what they asked */
0a7de745
A
1852 if (namep) {
1853 *namep = NULL;
1854 }
b0d623f7
A
1855
1856 return 1;
1857 } else {
1858 return 0;
1859 }
1860 }
1861
1862 /* trace all processes. Tell user what we traced */
1863 if (namep) {
1864 *namep = p->p_comm;
1865 }
1866
1867 return 1;
1868}
1869#endif
1870
2d21ac55
A
1871/* PR-5293665: need to use a callback function for kern.osversion to set
1872 * osversion in IORegistry */
55e303ae 1873
6d2010ae 1874STATIC int
2d21ac55 1875sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
55e303ae 1876{
0a7de745 1877 int rval = 0;
55e303ae 1878
0a7de745 1879 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2d21ac55 1880
0a7de745
A
1881 if (req->newptr) {
1882 IORegistrySetOSBuildVersion((char *)arg1);
1883 }
2d21ac55 1884
0a7de745 1885 return rval;
2d21ac55
A
1886}
1887
1888SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
0a7de745
A
1889 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1890 osversion, 256 /* OSVERSIZE*/,
1891 sysctl_osversion, "A", "");
2d21ac55 1892
f427ee49
A
1893static bool
1894_already_set_or_not_launchd(struct sysctl_req *req, char *val)
a39ff7e2
A
1895{
1896 if (req->newptr != 0) {
1897 /*
1898 * Can only ever be set by launchd, and only once at boot.
1899 */
f427ee49
A
1900 if (req->p->p_pid != 1 || val[0] != '\0') {
1901 return true;
a39ff7e2
A
1902 }
1903 }
f427ee49
A
1904 return false;
1905}
1906
1907#if XNU_TARGET_OS_OSX
1908static int
1909sysctl_system_version_compat
1910(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1911{
1912 int oldval = (task_has_system_version_compat_enabled(current_task()));
1913 int new_value = 0, changed = 0;
1914
1915 int error = sysctl_io_number(req, oldval, sizeof(int), &new_value, &changed);
1916 if (changed) {
1917 task_set_system_version_compat_enabled(current_task(), (new_value));
1918 }
1919 return error;
1920}
1921
1922SYSCTL_PROC(_kern, OID_AUTO, system_version_compat,
1923 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1924 0, 0, sysctl_system_version_compat, "A", "");
1925
1926char osproductversioncompat[48] = { '\0' };
1927
1928static int
1929sysctl_osproductversioncompat(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1930{
1931 if (_already_set_or_not_launchd(req, osproductversioncompat)) {
1932 return EPERM;
1933 }
1934 return sysctl_handle_string(oidp, arg1, arg2, req);
1935}
1936
1937
1938SYSCTL_PROC(_kern, OID_AUTO, osproductversioncompat,
1939 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1940 osproductversioncompat, sizeof(osproductversioncompat),
1941 sysctl_osproductversioncompat, "A", "The ProductVersion from SystemVersionCompat.plist");
1942#endif
1943
1944char osproductversion[48] = { '\0' };
1945
1946static int
1947sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1948{
1949 if (_already_set_or_not_launchd(req, osproductversion)) {
1950 return EPERM;
1951 }
a39ff7e2 1952
f427ee49 1953#if !XNU_TARGET_OS_OSX
0a7de745 1954 return sysctl_handle_string(oidp, arg1, arg2, req);
f427ee49
A
1955#else
1956 if (task_has_system_version_compat_enabled(current_task()) && (osproductversioncompat[0] != '\0')) {
1957 return sysctl_handle_string(oidp, osproductversioncompat, arg2, req);
1958 } else {
1959 return sysctl_handle_string(oidp, arg1, arg2, req);
1960 }
1961#endif
a39ff7e2
A
1962}
1963
f427ee49
A
1964#if XNU_TARGET_OS_OSX
1965static_assert(sizeof(osproductversioncompat) == sizeof(osproductversion),
1966 "osproductversion size matches osproductversioncompat size");
1967#endif
1968
a39ff7e2 1969SYSCTL_PROC(_kern, OID_AUTO, osproductversion,
0a7de745
A
1970 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1971 osproductversion, sizeof(osproductversion),
1972 sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist");
a39ff7e2 1973
f427ee49
A
1974char osreleasetype[48] = { '\0' };
1975
1976STATIC int
1977sysctl_osreleasetype(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1978{
1979 if (_already_set_or_not_launchd(req, osreleasetype)) {
1980 return EPERM;
1981 }
1982 return sysctl_handle_string(oidp, arg1, arg2, req);
1983}
1984
1985void reset_osreleasetype(void);
1986
1987void
1988reset_osreleasetype(void)
1989{
1990 memset(osreleasetype, 0, sizeof(osreleasetype));
1991}
1992
1993SYSCTL_PROC(_kern, OID_AUTO, osreleasetype,
1994 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1995 osreleasetype, sizeof(osreleasetype),
1996 sysctl_osreleasetype, "A", "The ReleaseType from SystemVersion.plist");
1997
cb323159
A
1998static uint64_t iossupportversion_string[48];
1999
2000STATIC int
2001sysctl_iossupportversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2002{
2003 if (req->newptr != 0) {
2004 /*
2005 * Can only ever be set by launchd, and only once at boot.
2006 */
2007 if (req->p->p_pid != 1 || iossupportversion_string[0] != '\0') {
2008 return EPERM;
2009 }
2010 }
2011
2012 return sysctl_handle_string(oidp, arg1, arg2, req);
2013}
2014
2015SYSCTL_PROC(_kern, OID_AUTO, iossupportversion,
2016 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2017 iossupportversion_string, sizeof(iossupportversion_string),
2018 sysctl_iossupportversion, "A", "The iOSSupportVersion from SystemVersion.plist");
2019
5ba3f43e
A
2020static uint64_t osvariant_status = 0;
2021
2022STATIC int
2023sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2024{
2025 if (req->newptr != 0) {
2026 /*
f427ee49
A
2027 * Can only ever be set by launchd, and only once.
2028 * Reset by usrctl() -> reset_osvariant_status() during
2029 * userspace reboot, since userspace could reboot into
2030 * a different variant.
5ba3f43e
A
2031 */
2032 if (req->p->p_pid != 1 || osvariant_status != 0) {
2033 return EPERM;
2034 }
2035 }
2036
0a7de745 2037 return sysctl_handle_quad(oidp, arg1, arg2, req);
5ba3f43e
A
2038}
2039
2040SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
0a7de745
A
2041 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
2042 &osvariant_status, sizeof(osvariant_status),
2043 sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
2044
f427ee49
A
2045void reset_osvariant_status(void);
2046
2047void
2048reset_osvariant_status(void)
2049{
2050 osvariant_status = 0;
2051}
2052
cb323159 2053extern void commpage_update_dyld_flags(uint64_t);
f427ee49 2054uint64_t dyld_flags = 0;
cb323159
A
2055
2056STATIC int
f427ee49 2057sysctl_dyld_flags(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
cb323159
A
2058{
2059 /*
2060 * Can only ever be set by launchd, possibly several times
2061 * as dyld may change its mind after a userspace reboot.
2062 */
2063 if (req->newptr != 0 && req->p->p_pid != 1) {
2064 return EPERM;
2065 }
2066
2067 int res = sysctl_handle_quad(oidp, arg1, arg2, req);
2068 if (req->newptr && res == 0) {
f427ee49 2069 commpage_update_dyld_flags(dyld_flags);
cb323159
A
2070 }
2071 return res;
2072}
2073
f427ee49 2074SYSCTL_PROC(_kern, OID_AUTO, dyld_flags,
cb323159 2075 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
f427ee49
A
2076 &dyld_flags, sizeof(dyld_flags),
2077 sysctl_dyld_flags, "Q", "Opaque flags used to cache dyld system-wide configuration");
cb323159 2078
0a7de745
A
2079#if defined(XNU_TARGET_OS_BRIDGE)
2080char macosproductversion[MACOS_VERS_LEN] = { '\0' };
2081
2082SYSCTL_STRING(_kern, OID_AUTO, macosproductversion,
2083 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2084 &macosproductversion[0], MACOS_VERS_LEN, "The currently running macOS ProductVersion (from SystemVersion.plist on macOS)");
2085
2086char macosversion[MACOS_VERS_LEN] = { '\0' };
2087
2088SYSCTL_STRING(_kern, OID_AUTO, macosversion,
2089 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2090 &macosversion[0], MACOS_VERS_LEN, "The currently running macOS build version");
2091#endif
5ba3f43e 2092
6d2010ae 2093STATIC int
2d21ac55
A
2094sysctl_sysctl_bootargs
2095(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2096{
2097 int error;
cb323159 2098 char buf[BOOT_LINE_LENGTH];
2d21ac55 2099
cb323159
A
2100 strlcpy(buf, PE_boot_args(), BOOT_LINE_LENGTH);
2101 error = sysctl_io_string(req, buf, BOOT_LINE_LENGTH, 0, NULL);
0a7de745 2102 return error;
2d21ac55
A
2103}
2104
2105SYSCTL_PROC(_kern, OID_AUTO, bootargs,
0a7de745
A
2106 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
2107 NULL, 0,
2108 sysctl_sysctl_bootargs, "A", "bootargs");
2d21ac55 2109
d9a64523
A
2110STATIC int
2111sysctl_kernelcacheuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2112{
0a7de745
A
2113 int rval = ENOENT;
2114 if (kernelcache_uuid_valid) {
2115 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2116 }
2117 return rval;
d9a64523
A
2118}
2119
2120SYSCTL_PROC(_kern, OID_AUTO, kernelcacheuuid,
0a7de745
A
2121 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2122 kernelcache_uuid_string, sizeof(kernelcache_uuid_string),
2123 sysctl_kernelcacheuuid, "A", "");
2124
f427ee49
A
2125STATIC int
2126sysctl_systemfilesetuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2127{
2128 int rval = ENOENT;
2129 if (pageablekc_uuid_valid) {
2130 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2131 }
2132 return rval;
2133}
2134
2135SYSCTL_PROC(_kern, OID_AUTO, systemfilesetuuid,
2136 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2137 pageablekc_uuid_string, sizeof(pageablekc_uuid_string),
2138 sysctl_systemfilesetuuid, "A", "");
2139
2140STATIC int
2141sysctl_auxiliaryfilesetuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2142{
2143 int rval = ENOENT;
2144 if (auxkc_uuid_valid) {
2145 rval = sysctl_handle_string(oidp, arg1, arg2, req);
2146 }
2147 return rval;
2148}
2149
2150SYSCTL_PROC(_kern, OID_AUTO, auxiliaryfilesetuuid,
2151 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2152 auxkc_uuid_string, sizeof(auxkc_uuid_string),
2153 sysctl_auxiliaryfilesetuuid, "A", "");
2154
2155STATIC int
2156sysctl_filesetuuid(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2157{
2158 int rval = ENOENT;
2159 kc_format_t kcformat;
2160 kernel_mach_header_t *mh;
2161 void *uuid = NULL;
2162 unsigned long uuidlen = 0;
2163 uuid_string_t uuid_str;
2164
2165 if (!PE_get_primary_kc_format(&kcformat) || kcformat != KCFormatFileset) {
2166 return rval;
2167 }
2168
2169 mh = (kernel_mach_header_t *)PE_get_kc_header(KCKindPrimary);
2170 uuid = getuuidfromheader(mh, &uuidlen);
2171
2172 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
2173 uuid_unparse_upper(*(uuid_t *)uuid, uuid_str);
2174 rval = sysctl_io_string(req, (char *)uuid_str, sizeof(uuid_str), 0, NULL);
2175 }
2176
2177 return rval;
2178}
2179
2180SYSCTL_PROC(_kern, OID_AUTO, filesetuuid,
2181 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
2182 NULL, 0,
2183 sysctl_filesetuuid, "A", "");
2184
2185
0a7de745
A
2186SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
2187 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2188 &maxfiles, 0, "");
2189SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
2190 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2191 (int *)NULL, ARG_MAX, "");
2192SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
2193 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2194 (int *)NULL, _POSIX_VERSION, "");
2195SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
2196 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2197 (int *)NULL, NGROUPS_MAX, "");
2198SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
2199 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2200 (int *)NULL, 1, "");
2201#if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
2202SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2203 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2204 (int *)NULL, 1, "");
2d21ac55 2205#else
0a7de745
A
2206SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
2207 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2208 NULL, 0, "");
2d21ac55 2209#endif
0a7de745
A
2210SYSCTL_INT(_kern, OID_AUTO, num_files,
2211 CTLFLAG_RD | CTLFLAG_LOCKED,
2212 &nfiles, 0, "");
2213SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
2214 CTLFLAG_RD | CTLFLAG_LOCKED,
2215 &numvnodes, 0, "");
2216SYSCTL_INT(_kern, OID_AUTO, num_tasks,
2217 CTLFLAG_RD | CTLFLAG_LOCKED,
2218 &task_max, 0, "");
2219SYSCTL_INT(_kern, OID_AUTO, num_threads,
2220 CTLFLAG_RD | CTLFLAG_LOCKED,
2221 &thread_max, 0, "");
2222SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
2223 CTLFLAG_RD | CTLFLAG_LOCKED,
2224 &task_threadmax, 0, "");
2225SYSCTL_LONG(_kern, OID_AUTO, num_recycledvnodes,
2226 CTLFLAG_RD | CTLFLAG_LOCKED,
2227 &num_recycledvnodes, "");
c3c9b80d
A
2228SYSCTL_COMPAT_INT(_kern, OID_AUTO, free_vnodes,
2229 CTLFLAG_RD | CTLFLAG_LOCKED,
2230 &freevnodes, 0, "");
2d21ac55 2231
6d2010ae 2232STATIC int
0a7de745 2233sysctl_maxvnodes(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2d21ac55 2234{
b0d623f7 2235 int oldval = desiredvnodes;
2d21ac55 2236 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
b0d623f7
A
2237
2238 if (oldval != desiredvnodes) {
b0d623f7
A
2239 resize_namecache(desiredvnodes);
2240 }
2241
0a7de745 2242 return error;
2d21ac55
A
2243}
2244
0a7de745
A
2245SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
2246 CTLFLAG_RW | CTLFLAG_LOCKED,
2247 &nc_disabled, 0, "");
6d2010ae 2248
2d21ac55 2249SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
0a7de745
A
2250 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2251 0, 0, sysctl_maxvnodes, "I", "");
2d21ac55
A
2252
2253SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
0a7de745
A
2254 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2255 0, 0, sysctl_maxproc, "I", "");
2d21ac55
A
2256
2257SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
0a7de745
A
2258 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2259 0, 0, sysctl_aiomax, "I", "");
2d21ac55
A
2260
2261SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
0a7de745
A
2262 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2263 0, 0, sysctl_aioprocmax, "I", "");
2d21ac55
A
2264
2265SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
0a7de745
A
2266 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2267 0, 0, sysctl_aiothreads, "I", "");
2268
2269SYSCTL_PROC(_kern, OID_AUTO, sched_enable_smt,
2270 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN,
2271 0, 0, sysctl_sched_enable_smt, "I", "");
2272
2273extern int sched_allow_NO_SMT_threads;
2274SYSCTL_INT(_kern, OID_AUTO, sched_allow_NO_SMT_threads,
2275 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2276 &sched_allow_NO_SMT_threads, 0, "");
2d21ac55 2277
fe8ab488 2278#if (DEVELOPMENT || DEBUG)
f427ee49
A
2279extern int smt_sched_bonus_16ths;
2280SYSCTL_INT(_kern, OID_AUTO, smt_sched_bonus_16ths,
2281 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2282 &smt_sched_bonus_16ths, 0, "");
2283
2284extern int smt_timeshare_enabled;
2285SYSCTL_INT(_kern, OID_AUTO, sched_smt_timeshare_enable,
2286 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2287 &smt_timeshare_enabled, 0, "");
2288
fe8ab488 2289extern int sched_smt_balance;
0a7de745
A
2290SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
2291 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2292 &sched_smt_balance, 0, "");
a39ff7e2 2293extern int sched_allow_rt_smt;
0a7de745
A
2294SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt,
2295 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2296 &sched_allow_rt_smt, 0, "");
2297extern int sched_avoid_cpu0;
2298SYSCTL_INT(_kern, OID_AUTO, sched_avoid_cpu0,
2299 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2300 &sched_avoid_cpu0, 0, "");
5ba3f43e
A
2301#if __arm__ || __arm64__
2302extern uint32_t perfcontrol_requested_recommended_cores;
2303SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores,
0a7de745
A
2304 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2305 &perfcontrol_requested_recommended_cores, 0, "");
5ba3f43e
A
2306
2307/* Scheduler perfcontrol callouts sysctls */
2308SYSCTL_DECL(_kern_perfcontrol_callout);
2309SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
0a7de745 2310 "scheduler perfcontrol callouts");
5ba3f43e
A
2311
2312extern int perfcontrol_callout_stats_enabled;
0a7de745
A
2313SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled,
2314 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2315 &perfcontrol_callout_stats_enabled, 0, "");
5ba3f43e
A
2316
2317extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
0a7de745 2318 perfcontrol_callout_stat_t stat);
5ba3f43e
A
2319
2320/* On-Core Callout */
2321STATIC int
2322sysctl_perfcontrol_callout_stat
2323(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
2324{
2325 perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
2326 perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
2327 return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
0a7de745 2328 sizeof(int), NULL, NULL);
5ba3f43e
A
2329}
2330
2331SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
0a7de745
A
2332 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2333 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
2334 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2335SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
0a7de745
A
2336 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2337 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
2338 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2339SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
0a7de745
A
2340 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2341 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
2342 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2343SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
0a7de745
A
2344 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2345 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
2346 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2347SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
0a7de745
A
2348 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2349 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
2350 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2351SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
0a7de745
A
2352 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2353 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
2354 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2355SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
0a7de745
A
2356 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2357 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
2358 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2359SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
0a7de745
A
2360 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2361 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
2362 sysctl_perfcontrol_callout_stat, "I", "");
5ba3f43e 2363
c6bf4f31
A
2364#if __AMP__
2365extern int sched_amp_idle_steal;
2366SYSCTL_INT(_kern, OID_AUTO, sched_amp_idle_steal,
2367 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2368 &sched_amp_idle_steal, 0, "");
2369extern int sched_amp_spill_steal;
2370SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_steal,
2371 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2372 &sched_amp_spill_steal, 0, "");
2373extern int sched_amp_spill_count;
2374SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_count,
2375 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2376 &sched_amp_spill_count, 0, "");
2377extern int sched_amp_spill_deferred_ipi;
2378SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_deferred_ipi,
2379 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2380 &sched_amp_spill_deferred_ipi, 0, "");
2381extern int sched_amp_pcores_preempt_immediate_ipi;
2382SYSCTL_INT(_kern, OID_AUTO, sched_amp_pcores_preempt_immediate_ipi,
2383 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2384 &sched_amp_pcores_preempt_immediate_ipi, 0, "");
2385#endif /* __AMP__ */
5ba3f43e 2386#endif /* __arm__ || __arm64__ */
cb323159
A
2387
2388#if __arm64__
2389extern int legacy_footprint_entitlement_mode;
2390SYSCTL_INT(_kern, OID_AUTO, legacy_footprint_entitlement_mode,
2391 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2392 &legacy_footprint_entitlement_mode, 0, "");
2393#endif /* __arm64__ */
2394
f427ee49
A
2395static int
2396sysctl_kern_sched_rt_n_backup_processors(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2397{
2398 int new_value, changed;
2399 int old_value = sched_get_rt_n_backup_processors();
2400 int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2401 if (changed) {
2402 sched_set_rt_n_backup_processors(new_value);
2403 }
2404
2405 return error;
2406}
2407
2408SYSCTL_PROC(_kern, OID_AUTO, sched_rt_n_backup_processors,
2409 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2410 0, 0, sysctl_kern_sched_rt_n_backup_processors, "I", "");
2411
5ba3f43e 2412#endif /* (DEVELOPMENT || DEBUG) */
fe8ab488 2413
6d2010ae 2414STATIC int
2d21ac55
A
2415sysctl_securelvl
2416(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2417{
2418 int new_value, changed;
2419 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
2420 if (changed) {
2421 if (!(new_value < securelevel && req->p->p_pid != 1)) {
2422 proc_list_lock();
2423 securelevel = new_value;
2424 proc_list_unlock();
2425 } else {
2426 error = EPERM;
e5568f75 2427 }
2d21ac55 2428 }
0a7de745 2429 return error;
2d21ac55
A
2430}
2431
2432SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
0a7de745
A
2433 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2434 0, 0, sysctl_securelvl, "I", "");
2d21ac55
A
2435
2436
6d2010ae 2437STATIC int
2d21ac55
A
2438sysctl_domainname
2439(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2440{
2441 int error, changed;
cb323159
A
2442 char tmpname[MAXHOSTNAMELEN] = {};
2443
2444 lck_mtx_lock(&domainname_lock);
2445 strlcpy(tmpname, domainname, sizeof(tmpname));
2446 lck_mtx_unlock(&domainname_lock);
2447
2448 error = sysctl_io_string(req, tmpname, sizeof(tmpname), 0, &changed);
2449 if (!error && changed) {
2450 lck_mtx_lock(&hostname_lock);
2451 strlcpy(domainname, tmpname, sizeof(domainname));
2452 lck_mtx_unlock(&hostname_lock);
2d21ac55 2453 }
0a7de745 2454 return error;
2d21ac55
A
2455}
2456
2457SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
0a7de745
A
2458 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2459 0, 0, sysctl_domainname, "A", "");
2d21ac55 2460
0a7de745
A
2461SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
2462 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2463 &hostid, 0, "");
2d21ac55 2464
6d2010ae 2465STATIC int
2d21ac55
A
2466sysctl_hostname
2467(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2468{
2469 int error, changed;
cb323159
A
2470 char tmpname[MAXHOSTNAMELEN] = {};
2471
2472 lck_mtx_lock(&hostname_lock);
2473 strlcpy(tmpname, hostname, sizeof(tmpname));
2474 lck_mtx_unlock(&hostname_lock);
2475
2476 error = sysctl_io_string(req, tmpname, sizeof(tmpname), 1, &changed);
2477 if (!error && changed) {
2478 lck_mtx_lock(&hostname_lock);
2479 strlcpy(hostname, tmpname, sizeof(hostname));
2480 lck_mtx_unlock(&hostname_lock);
2d21ac55 2481 }
0a7de745 2482 return error;
2d21ac55
A
2483}
2484
2d21ac55 2485SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
0a7de745
A
2486 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
2487 0, 0, sysctl_hostname, "A", "");
2d21ac55 2488
6d2010ae 2489STATIC int
2d21ac55
A
2490sysctl_procname
2491(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2492{
2493 /* Original code allowed writing, I'm copying this, although this all makes
0a7de745
A
2494 * no sense to me. Besides, this sysctl is never used. */
2495 return sysctl_io_string(req, &req->p->p_name[0], (2 * MAXCOMLEN + 1), 1, NULL);
2d21ac55
A
2496}
2497
2498SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
0a7de745
A
2499 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2500 0, 0, sysctl_procname, "A", "");
2d21ac55 2501
0a7de745
A
2502SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2503 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2504 &speculative_reads_disabled, 0, "");
2d21ac55 2505
0a7de745
A
2506SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
2507 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2508 &preheat_max_bytes, 0, "");
b0d623f7 2509
0a7de745
A
2510SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
2511 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2512 &preheat_min_bytes, 0, "");
b0d623f7 2513
0a7de745
A
2514SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
2515 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2516 &speculative_prefetch_max, 0, "");
b0d623f7 2517
0a7de745
A
2518SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
2519 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2520 &speculative_prefetch_max_iosize, 0, "");
316670eb 2521
6d2010ae 2522SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
0a7de745
A
2523 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2524 &vm_page_free_target, 0, "");
6d2010ae
A
2525
2526SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
0a7de745
A
2527 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2528 &vm_page_free_min, 0, "");
6d2010ae
A
2529
2530SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
0a7de745
A
2531 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2532 &vm_page_free_reserved, 0, "");
6d2010ae
A
2533
2534SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
0a7de745
A
2535 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2536 &vm_pageout_state.vm_page_speculative_percentage, 0, "");
6d2010ae
A
2537
2538SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
0a7de745
A
2539 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2540 &vm_pageout_state.vm_page_speculative_q_age_ms, 0, "");
6d2010ae
A
2541
2542SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
0a7de745
A
2543 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2544 &vm_max_delayed_work_limit, 0, "");
6d2010ae
A
2545
2546SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
0a7de745
A
2547 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2548 &vm_max_batch, 0, "");
6d2010ae 2549
39236c6e 2550SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
0a7de745
A
2551 CTLFLAG_RD | CTLFLAG_LOCKED,
2552 &bootsessionuuid_string, sizeof(bootsessionuuid_string), "");
6d2010ae 2553
f427ee49 2554
6d2010ae 2555STATIC int
2d21ac55
A
2556sysctl_boottime
2557(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2558{
39037602
A
2559 struct timeval tv;
2560 boottime_timeval(&tv);
b0d623f7 2561 struct proc *p = req->p;
2d21ac55 2562
b0d623f7 2563 if (proc_is64bit(p)) {
5ba3f43e 2564 struct user64_timeval t = {};
39037602
A
2565 t.tv_sec = tv.tv_sec;
2566 t.tv_usec = tv.tv_usec;
b0d623f7
A
2567 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2568 } else {
5ba3f43e 2569 struct user32_timeval t = {};
f427ee49 2570 t.tv_sec = (user32_time_t)tv.tv_sec;
39037602 2571 t.tv_usec = tv.tv_usec;
b0d623f7
A
2572 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2573 }
2d21ac55
A
2574}
2575
2576SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
0a7de745
A
2577 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2578 0, 0, sysctl_boottime, "S,timeval", "");
2d21ac55 2579
f427ee49
A
2580extern const char* IOGetBootUUID(void);
2581
2582/* non-static: written by imageboot.c */
2583uuid_string_t fake_bootuuid;
2584
2585STATIC int
2586sysctl_bootuuid
2587(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2588{
2589 int error = ENOENT;
2590
2591 /* check the first byte to see if the string has been
2592 * populated. this is a uuid_STRING_t, this check would
2593 * not work with a uuid_t.
2594 */
2595 if (fake_bootuuid[0] != '\0') {
2596 error = sysctl_io_string(req, fake_bootuuid, 0, 0, NULL);
2597 goto out;
2598 }
2599
2600 const char *uuid_string = IOGetBootUUID();
2601 if (uuid_string) {
2602 uuid_t boot_uuid;
2603 error = uuid_parse(uuid_string, boot_uuid);
2604 if (!error) {
2605 error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL);
2606 }
2607 }
2608
2609out:
2610 return error;
2611}
2612
2613SYSCTL_PROC(_kern, OID_AUTO, bootuuid,
2614 CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2615 0, 0, sysctl_bootuuid, "A", "");
2616
2617
2618extern const char* IOGetApfsPrebootUUID(void);
2619extern const char *IOGetAssociatedApfsVolgroupUUID(void);
2620
2621STATIC int
2622sysctl_apfsprebootuuid
2623(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2624{
2625 int error = ENOENT;
2626
2627 const char *uuid_string = IOGetApfsPrebootUUID();
2628 if (uuid_string) {
2629 uuid_t apfs_preboot_uuid;
2630 error = uuid_parse(uuid_string, apfs_preboot_uuid);
2631 if (!error) {
2632 error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL);
2633 }
2634 }
2635
2636 return error;
2637}
2638
2639SYSCTL_PROC(_kern, OID_AUTO, apfsprebootuuid,
2640 CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2641 0, 0, sysctl_apfsprebootuuid, "A", "");
2642
2643STATIC int
2644sysctl_targetsystemvolgroupuuid
2645(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2646{
2647 int error = ENOENT;
2648
2649 const char *uuid_string = IOGetApfsPrebootUUID();
2650 if (uuid_string) {
2651 uuid_t apfs_preboot_uuid;
2652 error = uuid_parse(uuid_string, apfs_preboot_uuid);
2653 if (!error) {
2654 error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL);
2655 }
2656 } else {
2657 /*
2658 * In special boot modes, such as kcgen-mode, the
2659 * apfs-preboot-uuid property will not be set. Instead, a
2660 * different property, associated-volume-group, will be set
2661 * which indicates the UUID of the VolumeGroup containing the
2662 * system volume into which you will boot.
2663 */
2664 uuid_string = IOGetAssociatedApfsVolgroupUUID();
2665 if (uuid_string) {
2666 uuid_t apfs_preboot_uuid;
2667 error = uuid_parse(uuid_string, apfs_preboot_uuid);
2668 if (!error) {
2669 error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL);
2670 }
2671 }
2672 }
2673
2674 return error;
2675}
2676
2677SYSCTL_PROC(_kern, OID_AUTO, targetsystemvolgroupuuid,
2678 CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
2679 0, 0, sysctl_targetsystemvolgroupuuid, "A", "");
2680
6d2010ae 2681STATIC int
2d21ac55
A
2682sysctl_symfile
2683(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2684{
2685 char *str;
2686 int error = get_kernel_symfile(req->p, &str);
0a7de745
A
2687 if (error) {
2688 return error;
2689 }
2d21ac55
A
2690 return sysctl_io_string(req, str, 0, 0, NULL);
2691}
2692
2693
2694SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
0a7de745
A
2695 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
2696 0, 0, sysctl_symfile, "A", "");
2d21ac55 2697
ea3f0419 2698#if CONFIG_NFS_CLIENT
6d2010ae 2699STATIC int
2d21ac55
A
2700sysctl_netboot
2701(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2702{
2703 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2704}
2705
2706SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
0a7de745
A
2707 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2708 0, 0, sysctl_netboot, "I", "");
2d21ac55
A
2709#endif
2710
b7266188 2711#ifdef CONFIG_IMGSRC_ACCESS
6d2010ae
A
2712/*
2713 * Legacy--act as if only one layer of nesting is possible.
2714 */
2715STATIC int
0a7de745 2716sysctl_imgsrcdev
b7266188
A
2717(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2718{
2719 vfs_context_t ctx = vfs_context_current();
2720 vnode_t devvp;
2721 int result;
2722
2723 if (!vfs_context_issuser(ctx)) {
2724 return EPERM;
0a7de745 2725 }
b7266188 2726
6d2010ae 2727 if (imgsrc_rootvnodes[0] == NULL) {
b7266188 2728 return ENOENT;
0a7de745 2729 }
b7266188 2730
6d2010ae 2731 result = vnode_getwithref(imgsrc_rootvnodes[0]);
b7266188
A
2732 if (result != 0) {
2733 return result;
2734 }
0a7de745 2735
6d2010ae 2736 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
b7266188
A
2737 result = vnode_getwithref(devvp);
2738 if (result != 0) {
2739 goto out;
2740 }
2741
2742 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
2743
2744 vnode_put(devvp);
2745out:
6d2010ae 2746 vnode_put(imgsrc_rootvnodes[0]);
b7266188
A
2747 return result;
2748}
2749
2750SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
0a7de745
A
2751 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2752 0, 0, sysctl_imgsrcdev, "I", "");
6d2010ae
A
2753
2754STATIC int
2755sysctl_imgsrcinfo
2756(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2757{
2758 int error;
0a7de745 2759 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */
6d2010ae
A
2760 uint32_t i;
2761 vnode_t rvp, devvp;
2762
2763 if (imgsrc_rootvnodes[0] == NULLVP) {
2764 return ENXIO;
2765 }
2766
2767 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2768 /*
2769 * Go get the root vnode.
2770 */
2771 rvp = imgsrc_rootvnodes[i];
2772 if (rvp == NULLVP) {
2773 break;
2774 }
2775
2776 error = vnode_get(rvp);
2777 if (error != 0) {
2778 return error;
2779 }
2780
0a7de745 2781 /*
6d2010ae
A
2782 * For now, no getting at a non-local volume.
2783 */
2784 devvp = vnode_mount(rvp)->mnt_devvp;
2785 if (devvp == NULL) {
2786 vnode_put(rvp);
0a7de745 2787 return EINVAL;
6d2010ae
A
2788 }
2789
2790 error = vnode_getwithref(devvp);
2791 if (error != 0) {
2792 vnode_put(rvp);
2793 return error;
2794 }
2795
2796 /*
2797 * Fill in info.
2798 */
2799 info[i].ii_dev = vnode_specrdev(devvp);
2800 info[i].ii_flags = 0;
2801 info[i].ii_height = i;
2802 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2803
2804 vnode_put(devvp);
2805 vnode_put(rvp);
2806 }
2807
2808 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2809}
2810
2811SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
0a7de745
A
2812 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2813 0, 0, sysctl_imgsrcinfo, "I", "");
6d2010ae 2814
b7266188
A
2815#endif /* CONFIG_IMGSRC_ACCESS */
2816
39236c6e
A
2817
2818SYSCTL_DECL(_kern_timer);
2819SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2820
fe8ab488 2821
0a7de745
A
2822SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2823 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2824 &mach_timer_coalescing_enabled, 0, "");
6d2010ae 2825
39236c6e 2826SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
0a7de745
A
2827 CTLFLAG_RW | CTLFLAG_LOCKED,
2828 &timer_deadline_tracking_bin_1, "");
39236c6e 2829SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
0a7de745
A
2830 CTLFLAG_RW | CTLFLAG_LOCKED,
2831 &timer_deadline_tracking_bin_2, "");
39236c6e
A
2832
2833SYSCTL_DECL(_kern_timer_longterm);
2834SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2835
fe8ab488 2836
39236c6e
A
2837/* Must match definition in osfmk/kern/timer_call.c */
2838enum {
2839 THRESHOLD, QCOUNT,
2840 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
a39ff7e2 2841 LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES
39236c6e 2842};
0a7de745 2843extern uint64_t timer_sysctl_get(int);
39236c6e
A
2844extern int timer_sysctl_set(int, uint64_t);
2845
2846STATIC int
2847sysctl_timer
2848(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2849{
0a7de745
A
2850 int oid = (int)arg1;
2851 uint64_t value = timer_sysctl_get(oid);
2852 uint64_t new_value;
2853 int error;
2854 int changed;
39236c6e
A
2855
2856 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
0a7de745 2857 if (changed) {
39236c6e 2858 error = timer_sysctl_set(oid, new_value);
0a7de745 2859 }
39236c6e
A
2860
2861 return error;
2862}
2863
2864SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
0a7de745
A
2865 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2866 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
5ba3f43e 2867SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
0a7de745
A
2868 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2869 (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
a39ff7e2 2870SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_interval,
0a7de745
A
2871 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2872 (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", "");
a39ff7e2 2873
39236c6e 2874SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
0a7de745
A
2875 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2876 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
a39ff7e2 2877SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
0a7de745
A
2878 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2879 (void *) PAUSES, 0, sysctl_timer, "Q", "");
a39ff7e2 2880
5ba3f43e 2881#if DEBUG
39236c6e 2882SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
0a7de745
A
2883 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2884 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
39236c6e 2885SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
0a7de745
A
2886 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2887 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
39236c6e 2888SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
0a7de745
A
2889 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2890 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
39236c6e 2891SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
0a7de745
A
2892 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2893 (void *) SCANS, 0, sysctl_timer, "Q", "");
39236c6e 2894SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
0a7de745
A
2895 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2896 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
39236c6e 2897SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
0a7de745
A
2898 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2899 (void *) LATENCY, 0, sysctl_timer, "Q", "");
39236c6e 2900SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
0a7de745
A
2901 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2902 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
39236c6e 2903SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
0a7de745
A
2904 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2905 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
39236c6e
A
2906#endif /* DEBUG */
2907
6d2010ae 2908STATIC int
2d21ac55
A
2909sysctl_usrstack
2910(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2911{
2912 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2913}
2914
b0d623f7 2915SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
0a7de745
A
2916 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2917 0, 0, sysctl_usrstack, "I", "");
2d21ac55 2918
6d2010ae 2919STATIC int
2d21ac55
A
2920sysctl_usrstack64
2921(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2922{
2923 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2924}
2925
2926SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
0a7de745
A
2927 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2928 0, 0, sysctl_usrstack64, "Q", "");
2d21ac55 2929
39037602
A
2930#if CONFIG_COREDUMP
2931
0a7de745
A
2932SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2933 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2934 corefilename, sizeof(corefilename), "");
2d21ac55 2935
6d2010ae 2936STATIC int
2d21ac55
A
2937sysctl_coredump
2938(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2939{
593a1d5f 2940#ifdef SECURE_KERNEL
fe8ab488 2941 (void)req;
0a7de745 2942 return ENOTSUP;
fe8ab488 2943#else
2d21ac55
A
2944 int new_value, changed;
2945 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2946 if (changed) {
0a7de745 2947 if ((new_value == 0) || (new_value == 1)) {
2d21ac55 2948 do_coredump = new_value;
0a7de745 2949 } else {
2d21ac55 2950 error = EINVAL;
0a7de745 2951 }
2d21ac55 2952 }
0a7de745 2953 return error;
fe8ab488 2954#endif
2d21ac55
A
2955}
2956
2957SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
0a7de745
A
2958 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2959 0, 0, sysctl_coredump, "I", "");
2d21ac55 2960
6d2010ae 2961STATIC int
2d21ac55
A
2962sysctl_suid_coredump
2963(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2964{
593a1d5f 2965#ifdef SECURE_KERNEL
fe8ab488 2966 (void)req;
0a7de745 2967 return ENOTSUP;
fe8ab488 2968#else
2d21ac55
A
2969 int new_value, changed;
2970 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2971 if (changed) {
0a7de745 2972 if ((new_value == 0) || (new_value == 1)) {
2d21ac55 2973 sugid_coredump = new_value;
0a7de745 2974 } else {
55e303ae 2975 error = EINVAL;
0a7de745 2976 }
55e303ae 2977 }
0a7de745 2978 return error;
fe8ab488 2979#endif
2d21ac55
A
2980}
2981
2982SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
0a7de745
A
2983 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2984 0, 0, sysctl_suid_coredump, "I", "");
2d21ac55 2985
39037602
A
2986#endif /* CONFIG_COREDUMP */
2987
6d2010ae 2988STATIC int
2d21ac55
A
2989sysctl_delayterm
2990(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2991{
2992 struct proc *p = req->p;
2993 int new_value, changed;
2994 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2995 if (changed) {
2996 proc_lock(p);
0a7de745 2997 if (new_value) {
2d21ac55 2998 req->p->p_lflag |= P_LDELAYTERM;
0a7de745 2999 } else {
2d21ac55 3000 req->p->p_lflag &= ~P_LDELAYTERM;
0a7de745 3001 }
2d21ac55
A
3002 proc_unlock(p);
3003 }
0a7de745 3004 return error;
2d21ac55
A
3005}
3006
3007SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
0a7de745
A
3008 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3009 0, 0, sysctl_delayterm, "I", "");
2d21ac55 3010
55e303ae 3011
6d2010ae 3012STATIC int
2d21ac55
A
3013sysctl_rage_vnode
3014(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
55e303ae 3015{
2d21ac55 3016 struct proc *p = req->p;
0a7de745 3017 struct uthread *ut;
2d21ac55
A
3018 int new_value, old_value, changed;
3019 int error;
55e303ae 3020
2d21ac55
A
3021 ut = get_bsdthread_info(current_thread());
3022
0a7de745
A
3023 if (ut->uu_flag & UT_RAGE_VNODES) {
3024 old_value = KERN_RAGE_THREAD;
3025 } else if (p->p_lflag & P_LRAGE_VNODES) {
3026 old_value = KERN_RAGE_PROC;
3027 } else {
3028 old_value = 0;
3029 }
2d21ac55
A
3030
3031 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3032
cb323159 3033 if ((error == 0) && (changed != 0)) {
0a7de745 3034 switch (new_value) {
2d21ac55 3035 case KERN_RAGE_PROC:
0a7de745 3036 proc_lock(p);
2d21ac55
A
3037 p->p_lflag |= P_LRAGE_VNODES;
3038 proc_unlock(p);
3039 break;
3040 case KERN_UNRAGE_PROC:
0a7de745 3041 proc_lock(p);
2d21ac55
A
3042 p->p_lflag &= ~P_LRAGE_VNODES;
3043 proc_unlock(p);
3044 break;
3045
3046 case KERN_RAGE_THREAD:
3047 ut->uu_flag |= UT_RAGE_VNODES;
3048 break;
3049 case KERN_UNRAGE_THREAD:
0a7de745 3050 ut = get_bsdthread_info(current_thread());
2d21ac55
A
3051 ut->uu_flag &= ~UT_RAGE_VNODES;
3052 break;
e5568f75 3053 }
2d21ac55 3054 }
0a7de745 3055 return error;
2d21ac55
A
3056}
3057
3058SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
0a7de745
A
3059 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
3060 0, 0, sysctl_rage_vnode, "I", "");
2d21ac55 3061
cb323159
A
3062/* XXX until filecoordinationd fixes a bit of inverted logic. */
3063STATIC int
3064sysctl_vfsnspace
3065(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3066{
3067 int old_value = 0, new_value, changed;
3068
3069 return sysctl_io_number(req, old_value, sizeof(int), &new_value,
3070 &changed);
3071}
3072
3073SYSCTL_PROC(_kern, OID_AUTO, vfsnspace,
3074 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
3075 0, 0, sysctl_vfsnspace, "I", "");
3076
316670eb
A
3077/* XXX move this interface into libproc and remove this sysctl */
3078STATIC int
3079sysctl_setthread_cpupercent
3080(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3081{
3082 int new_value, old_value;
3083 int error = 0;
3084 kern_return_t kret = KERN_SUCCESS;
3085 uint8_t percent = 0;
3086 int ms_refill = 0;
3087
0a7de745
A
3088 if (!req->newptr) {
3089 return 0;
3090 }
39236c6e 3091
316670eb
A
3092 old_value = 0;
3093
0a7de745
A
3094 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0) {
3095 return error;
3096 }
316670eb 3097
0a7de745
A
3098 percent = new_value & 0xff; /* low 8 bytes for perent */
3099 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
3100 if (percent > 100) {
3101 return EINVAL;
3102 }
316670eb
A
3103
3104 /*
3105 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
3106 */
0a7de745
A
3107 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0) {
3108 return EIO;
3109 }
3110
3111 return 0;
316670eb
A
3112}
3113
3114SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
0a7de745
A
3115 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
3116 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
316670eb 3117
2d21ac55 3118
6d2010ae 3119STATIC int
2d21ac55
A
3120sysctl_kern_check_openevt
3121(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3122{
3123 struct proc *p = req->p;
3124 int new_value, old_value, changed;
3125 int error;
3126
3127 if (p->p_flag & P_CHECKOPENEVT) {
3128 old_value = KERN_OPENEVT_PROC;
3129 } else {
0a7de745 3130 old_value = 0;
2d21ac55
A
3131 }
3132
3133 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3134
cb323159 3135 if ((error == 0) && (changed != 0)) {
0a7de745 3136 switch (new_value) {
2d21ac55 3137 case KERN_OPENEVT_PROC:
b0d623f7 3138 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2d21ac55
A
3139 break;
3140
3141 case KERN_UNOPENEVT_PROC:
b0d623f7 3142 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2d21ac55
A
3143 break;
3144
3145 default:
55e303ae 3146 error = EINVAL;
2d21ac55 3147 }
55e303ae 3148 }
0a7de745 3149 return error;
2d21ac55
A
3150}
3151
6d2010ae 3152SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
0a7de745 3153 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2d21ac55
A
3154
3155
0a7de745 3156#if DEVELOPMENT || DEBUG
6d2010ae 3157STATIC int
2d21ac55
A
3158sysctl_nx
3159(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3160{
4a3eedf9 3161#ifdef SECURE_KERNEL
fe8ab488 3162 (void)req;
4a3eedf9 3163 return ENOTSUP;
fe8ab488 3164#else
2d21ac55
A
3165 int new_value, changed;
3166 int error;
3167
3168 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
0a7de745 3169 if (error) {
4a3eedf9 3170 return error;
0a7de745 3171 }
2d21ac55 3172
4a3eedf9 3173 if (changed) {
0a7de745 3174#if defined(__x86_64__)
2d21ac55
A
3175 /*
3176 * Only allow setting if NX is supported on the chip
3177 */
0a7de745 3178 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) {
4a3eedf9 3179 return ENOTSUP;
0a7de745 3180 }
2d21ac55 3181#endif
4a3eedf9
A
3182 nx_enabled = new_value;
3183 }
0a7de745 3184 return error;
fe8ab488 3185#endif /* SECURE_KERNEL */
2d21ac55 3186}
0a7de745 3187#endif
2d21ac55 3188
0a7de745
A
3189#if DEVELOPMENT || DEBUG
3190SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
3191 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3192 0, 0, sysctl_nx, "I", "");
3193#endif
2d21ac55 3194
6d2010ae 3195STATIC int
2d21ac55
A
3196sysctl_loadavg
3197(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3198{
0a7de745
A
3199 if (proc_is64bit(req->p)) {
3200 struct user64_loadavg loadinfo64 = {};
3201 fill_loadavg64(&averunnable, &loadinfo64);
3202 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
3203 } else {
3204 struct user32_loadavg loadinfo32 = {};
3205 fill_loadavg32(&averunnable, &loadinfo32);
3206 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
3207 }
2d21ac55
A
3208}
3209
3210SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
0a7de745
A
3211 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
3212 0, 0, sysctl_loadavg, "S,loadavg", "");
2d21ac55 3213
6d2010ae
A
3214/*
3215 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
3216 */
3217STATIC int
3218sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
0a7de745 3219 __unused int arg2, struct sysctl_req *req)
6d2010ae 3220{
0a7de745
A
3221 int old_value = 0, new_value = 0, error = 0;
3222
3223 if (vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value )) {
3224 return error;
3225 }
6d2010ae
A
3226 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
3227 if (!error) {
0a7de745 3228 return vm_toggle_entry_reuse(new_value, NULL);
6d2010ae 3229 }
0a7de745 3230 return error;
6d2010ae
A
3231}
3232
0a7de745 3233SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse, "I", "");
6d2010ae 3234
d9a64523
A
3235#ifdef CONFIG_XNUPOST
3236
d9a64523 3237extern uint32_t xnupost_get_estimated_testdata_size(void);
d9a64523
A
3238extern int xnupost_reset_all_tests(void);
3239
3240STATIC int
3241sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
3242{
3243 /* fixup unused arguments warnings */
3244 __unused int _oa2 = arg2;
3245 __unused void * _oa1 = arg1;
3246 __unused struct sysctl_oid * _oidp = oidp;
3247
3248 int error = 0;
3249 user_addr_t oldp = 0;
3250 user_addr_t newp = 0;
3251 uint32_t usedbytes = 0;
3252
3253 oldp = req->oldptr;
3254 newp = req->newptr;
3255
0a7de745 3256 if (newp) {
d9a64523 3257 return ENOTSUP;
0a7de745 3258 }
d9a64523
A
3259
3260 if ((void *)oldp == NULL) {
3261 /* return estimated size for second call where info can be placed */
3262 req->oldidx = xnupost_get_estimated_testdata_size();
3263 } else {
3264 error = xnupost_export_testdata((void *)oldp, req->oldlen, &usedbytes);
3265 req->oldidx = usedbytes;
3266 }
3267
3268 return error;
3269}
3270
3271SYSCTL_PROC(_debug,
0a7de745
A
3272 OID_AUTO,
3273 xnupost_get_tests,
3274 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
3275 0,
3276 0,
3277 sysctl_handle_xnupost_get_tests,
3278 "-",
3279 "read xnupost test data in kernel");
d9a64523
A
3280
3281STATIC int
3282sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
3283{
3284 /* fixup unused arguments warnings */
3285 __unused int _oa2 = arg2;
3286 __unused void * _oa1 = arg1;
3287 __unused struct sysctl_oid * _oidp = oidp;
3288
3289#define ARRCOUNT 4
3290 /*
3291 * INPUT: ACTION, PARAM1, PARAM2, PARAM3
3292 * OUTPUT: RESULTCODE, ADDITIONAL DATA
3293 */
3294 int32_t outval[ARRCOUNT] = {0};
3295 int32_t input[ARRCOUNT] = {0};
3296 int32_t out_size = sizeof(outval);
3297 int32_t in_size = sizeof(input);
3298 int error = 0;
3299
3300 /* if this is NULL call to find out size, send out size info */
3301 if (!req->newptr) {
3302 goto out;
3303 }
3304
3305 /* pull in provided value from userspace */
3306 error = SYSCTL_IN(req, &input[0], in_size);
0a7de745 3307 if (error) {
d9a64523 3308 return error;
0a7de745 3309 }
d9a64523
A
3310
3311 if (input[0] == XTCTL_RESET_TESTDATA) {
3312 outval[0] = xnupost_reset_all_tests();
3313 goto out;
3314 }
3315
3316out:
3317 error = SYSCTL_OUT(req, &outval[0], out_size);
3318 return error;
3319}
3320
3321SYSCTL_PROC(_debug,
0a7de745
A
3322 OID_AUTO,
3323 xnupost_testctl,
3324 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
3325 0,
3326 0,
3327 sysctl_debug_xnupost_ctl,
3328 "I",
3329 "xnupost control for kernel testing");
d9a64523
A
3330
3331extern void test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t arraycount);
3332
3333STATIC int
3334sysctl_debug_test_oslog_ctl(__unused struct sysctl_oid * oidp, __unused void * arg1, __unused int arg2, struct sysctl_req * req)
3335{
3336#define ARRCOUNT 4
3337 int32_t outval[ARRCOUNT] = {0};
3338 int32_t input[ARRCOUNT] = {0};
3339 int32_t size_outval = sizeof(outval);
3340 int32_t size_inval = sizeof(input);
3341 int32_t error;
3342
3343 /* if this is NULL call to find out size, send out size info */
3344 if (!req->newptr) {
3345 error = SYSCTL_OUT(req, &outval[0], size_outval);
3346 return error;
3347 }
3348
3349 /* pull in provided value from userspace */
3350 error = SYSCTL_IN(req, &input[0], size_inval);
0a7de745 3351 if (error) {
d9a64523 3352 return error;
0a7de745 3353 }
d9a64523
A
3354
3355 test_oslog_handleOSLogCtl(input, outval, ARRCOUNT);
3356
3357 error = SYSCTL_OUT(req, &outval[0], size_outval);
3358
3359 return error;
3360}
3361
3362SYSCTL_PROC(_debug,
0a7de745
A
3363 OID_AUTO,
3364 test_OSLogCtl,
3365 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
3366 0,
3367 0,
3368 sysctl_debug_test_oslog_ctl,
3369 "I",
3370 "testing oslog in kernel");
d9a64523
A
3371
3372#include <mach/task.h>
3373#include <mach/semaphore.h>
3374
c3c9b80d
A
3375static LCK_GRP_DECLARE(sysctl_debug_test_stackshot_owner_grp, "test-stackshot-owner-grp");
3376static LCK_MTX_DECLARE(sysctl_debug_test_stackshot_owner_init_mtx,
3377 &sysctl_debug_test_stackshot_owner_grp);
d9a64523
A
3378
3379/* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
3380 * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
3381 * stackshot is taken to see if the owner of the lock can be identified.
3382 *
3383 * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
3384 * the semaphores allow us to artificially create cases where the lock is being held and the
3385 * thread is hanging / taking a long time to do something. */
3386
3387volatile char sysctl_debug_test_stackshot_mtx_inited = 0;
3388semaphore_t sysctl_debug_test_stackshot_mutex_sem;
3389lck_mtx_t sysctl_debug_test_stackshot_owner_lck;
3390
3391#define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1
3392#define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
3393#define SYSCTL_DEBUG_MTX_SIGNAL 3
3394#define SYSCTL_DEBUG_MTX_TEARDOWN 4
3395
3396STATIC int
3397sysctl_debug_test_stackshot_mutex_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3398{
3399 long long option = -1;
3400 /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
3401 long long mtx_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck);
3402 int error = sysctl_io_number(req, mtx_unslid_addr, sizeof(long long), (void*)&option, NULL);
3403
c3c9b80d 3404 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
d9a64523
A
3405 if (!sysctl_debug_test_stackshot_mtx_inited) {
3406 lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck,
c3c9b80d 3407 &sysctl_debug_test_stackshot_owner_grp,
0a7de745 3408 LCK_ATTR_NULL);
d9a64523 3409 semaphore_create(kernel_task,
0a7de745
A
3410 &sysctl_debug_test_stackshot_mutex_sem,
3411 SYNC_POLICY_FIFO, 0);
d9a64523
A
3412 sysctl_debug_test_stackshot_mtx_inited = 1;
3413 }
c3c9b80d 3414 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
d9a64523
A
3415
3416 if (!error) {
0a7de745
A
3417 switch (option) {
3418 case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT:
3419 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
3420 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
3421 break;
3422 case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT:
3423 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
3424 semaphore_wait(sysctl_debug_test_stackshot_mutex_sem);
3425 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
3426 break;
3427 case SYSCTL_DEBUG_MTX_SIGNAL:
3428 semaphore_signal(sysctl_debug_test_stackshot_mutex_sem);
3429 break;
3430 case SYSCTL_DEBUG_MTX_TEARDOWN:
c3c9b80d 3431 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
0a7de745
A
3432
3433 lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck,
c3c9b80d 3434 &sysctl_debug_test_stackshot_owner_grp);
0a7de745
A
3435 semaphore_destroy(kernel_task,
3436 sysctl_debug_test_stackshot_mutex_sem);
3437 sysctl_debug_test_stackshot_mtx_inited = 0;
3438
c3c9b80d 3439 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
0a7de745
A
3440 break;
3441 case -1: /* user just wanted to read the value, so do nothing */
3442 break;
3443 default:
3444 error = EINVAL;
3445 break;
d9a64523
A
3446 }
3447 }
3448 return error;
3449}
3450
3451/* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
3452 * the semaphores allow us to artificially create cases where the lock is being held and the
3453 * thread is hanging / taking a long time to do something. */
3454
3455SYSCTL_PROC(_debug,
0a7de745
A
3456 OID_AUTO,
3457 test_MutexOwnerCtl,
3458 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3459 0,
3460 0,
3461 sysctl_debug_test_stackshot_mutex_owner,
3462 "-",
3463 "Testing mutex owner in kernel");
d9a64523
A
3464
3465volatile char sysctl_debug_test_stackshot_rwlck_inited = 0;
3466lck_rw_t sysctl_debug_test_stackshot_owner_rwlck;
3467semaphore_t sysctl_debug_test_stackshot_rwlck_sem;
3468
3469#define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
3470#define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2
3471#define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
3472#define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4
3473#define SYSCTL_DEBUG_KRWLCK_SIGNAL 5
3474#define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6
3475
3476STATIC int
3477sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3478{
3479 long long option = -1;
0a7de745 3480 /* if the user tries to read the sysctl, we tell them what the address of the lock is
d9a64523
A
3481 * (to test against stackshot's output) */
3482 long long rwlck_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck);
3483 int error = sysctl_io_number(req, rwlck_unslid_addr, sizeof(long long), (void*)&option, NULL);
3484
c3c9b80d 3485 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
d9a64523
A
3486 if (!sysctl_debug_test_stackshot_rwlck_inited) {
3487 lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck,
c3c9b80d 3488 &sysctl_debug_test_stackshot_owner_grp,
0a7de745 3489 LCK_ATTR_NULL);
d9a64523 3490 semaphore_create(kernel_task,
0a7de745
A
3491 &sysctl_debug_test_stackshot_rwlck_sem,
3492 SYNC_POLICY_FIFO,
3493 0);
d9a64523
A
3494 sysctl_debug_test_stackshot_rwlck_inited = 1;
3495 }
c3c9b80d 3496 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
d9a64523
A
3497
3498 if (!error) {
0a7de745
A
3499 switch (option) {
3500 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT:
3501 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3502 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3503 break;
3504 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT:
3505 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3506 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
3507 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
3508 break;
3509 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT:
3510 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3511 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3512 break;
3513 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT:
3514 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3515 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
3516 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
3517 break;
3518 case SYSCTL_DEBUG_KRWLCK_SIGNAL:
3519 semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem);
3520 break;
3521 case SYSCTL_DEBUG_KRWLCK_TEARDOWN:
c3c9b80d 3522 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_init_mtx);
0a7de745
A
3523
3524 lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck,
c3c9b80d 3525 &sysctl_debug_test_stackshot_owner_grp);
0a7de745
A
3526 semaphore_destroy(kernel_task,
3527 sysctl_debug_test_stackshot_rwlck_sem);
3528 sysctl_debug_test_stackshot_rwlck_inited = 0;
3529
c3c9b80d 3530 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_init_mtx);
0a7de745
A
3531 break;
3532 case -1: /* user just wanted to read the value, so do nothing */
3533 break;
3534 default:
3535 error = EINVAL;
3536 break;
d9a64523
A
3537 }
3538 }
3539 return error;
3540}
3541
3542
3543SYSCTL_PROC(_debug,
0a7de745
A
3544 OID_AUTO,
3545 test_RWLockOwnerCtl,
3546 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3547 0,
3548 0,
3549 sysctl_debug_test_stackshot_rwlck_owner,
3550 "-",
3551 "Testing rwlock owner in kernel");
d9a64523 3552#endif /* !CONFIG_XNUPOST */
39037602 3553
6d2010ae 3554STATIC int
2d21ac55
A
3555sysctl_swapusage
3556(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3557{
0a7de745
A
3558 int error;
3559 uint64_t swap_total;
3560 uint64_t swap_avail;
3561 vm_size_t swap_pagesize;
3562 boolean_t swap_encrypted;
3563 struct xsw_usage xsu = {};
3564
3565 error = macx_swapinfo(&swap_total,
3566 &swap_avail,
3567 &swap_pagesize,
3568 &swap_encrypted);
3569 if (error) {
3570 return error;
3571 }
2d21ac55 3572
0a7de745
A
3573 xsu.xsu_total = swap_total;
3574 xsu.xsu_avail = swap_avail;
3575 xsu.xsu_used = swap_total - swap_avail;
f427ee49 3576 xsu.xsu_pagesize = (u_int32_t)MIN(swap_pagesize, UINT32_MAX);
0a7de745
A
3577 xsu.xsu_encrypted = swap_encrypted;
3578 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2d21ac55
A
3579}
3580
3581
3582
3583SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
0a7de745
A
3584 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
3585 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2d21ac55 3586
6d2010ae
A
3587#if CONFIG_FREEZE
3588extern void vm_page_reactivate_all_throttled(void);
d9a64523 3589extern void memorystatus_disable_freeze(void);
6d2010ae
A
3590
3591static int
3592sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
3593{
3594#pragma unused(arg1, arg2)
316670eb 3595 int error, val = memorystatus_freeze_enabled ? 1 : 0;
6d2010ae
A
3596 boolean_t disabled;
3597
3598 error = sysctl_handle_int(oidp, &val, 0, req);
0a7de745
A
3599 if (error || !req->newptr) {
3600 return error;
3601 }
39037602 3602
0a7de745 3603 if (!VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
39236c6e 3604 //assert(req->newptr);
fe8ab488 3605 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
39236c6e
A
3606 return EINVAL;
3607 }
3608
0a7de745
A
3609 /*
3610 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
6d2010ae 3611 */
316670eb 3612 disabled = (!val && memorystatus_freeze_enabled);
0a7de745 3613
316670eb 3614 memorystatus_freeze_enabled = val ? TRUE : FALSE;
0a7de745 3615
6d2010ae
A
3616 if (disabled) {
3617 vm_page_reactivate_all_throttled();
d9a64523 3618 memorystatus_disable_freeze();
6d2010ae 3619 }
0a7de745
A
3620
3621 return 0;
6d2010ae
A
3622}
3623
0a7de745 3624SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
6d2010ae 3625#endif /* CONFIG_FREEZE */
2d21ac55 3626
d9a64523
A
3627#if DEVELOPMENT || DEBUG
3628extern int vm_num_swap_files_config;
3629extern int vm_num_swap_files;
3630extern lck_mtx_t vm_swap_data_lock;
0a7de745 3631#define VM_MAX_SWAP_FILE_NUM 100
d9a64523
A
3632
3633static int
3634sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
3635{
3636#pragma unused(arg1, arg2)
3637 int error = 0, val = vm_num_swap_files_config;
3638
3639 error = sysctl_handle_int(oidp, &val, 0, req);
3640 if (error || !req->newptr) {
0a7de745 3641 goto out;
d9a64523
A
3642 }
3643
3644 if (!VM_CONFIG_SWAP_IS_ACTIVE && !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3645 printf("Swap is disabled\n");
3646 error = EINVAL;
3647 goto out;
3648 }
3649
3650 lck_mtx_lock(&vm_swap_data_lock);
3651
3652 if (val < vm_num_swap_files) {
3653 printf("Cannot configure fewer swap files than already exist.\n");
3654 error = EINVAL;
3655 lck_mtx_unlock(&vm_swap_data_lock);
3656 goto out;
3657 }
3658
3659 if (val > VM_MAX_SWAP_FILE_NUM) {
3660 printf("Capping number of swap files to upper bound.\n");
3661 val = VM_MAX_SWAP_FILE_NUM;
3662 }
3663
3664 vm_num_swap_files_config = val;
3665 lck_mtx_unlock(&vm_swap_data_lock);
3666out:
3667
0a7de745 3668 return 0;
d9a64523
A
3669}
3670
3671SYSCTL_PROC(_debug, OID_AUTO, num_swap_files_configured, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_config_num_swap_files, "I", "");
3672#endif /* DEVELOPMENT || DEBUG */
3673
2d21ac55 3674/* this kernel does NOT implement shared_region_make_private_np() */
0a7de745
A
3675SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
3676 CTLFLAG_RD | CTLFLAG_LOCKED,
3677 (int *)NULL, 0, "");
0c530ab8 3678
6d2010ae 3679STATIC int
0c530ab8 3680fetch_process_cputype(
2d21ac55 3681 proc_t cur_proc,
0c530ab8
A
3682 int *name,
3683 u_int namelen,
3684 cpu_type_t *cputype)
3685{
2d21ac55
A
3686 proc_t p = PROC_NULL;
3687 int refheld = 0;
0c530ab8 3688 cpu_type_t ret = 0;
2d21ac55 3689 int error = 0;
0a7de745
A
3690
3691 if (namelen == 0) {
0c530ab8 3692 p = cur_proc;
0a7de745 3693 } else if (namelen == 1) {
2d21ac55 3694 p = proc_find(name[0]);
0a7de745
A
3695 if (p == NULL) {
3696 return EINVAL;
3697 }
2d21ac55 3698 refheld = 1;
0c530ab8 3699 } else {
2d21ac55
A
3700 error = EINVAL;
3701 goto out;
0c530ab8
A
3702 }
3703
fe8ab488 3704 ret = cpu_type() & ~CPU_ARCH_MASK;
d9a64523 3705 if (IS_64BIT_PROCESS(p)) {
fe8ab488 3706 ret |= CPU_ARCH_ABI64;
d9a64523 3707 }
fe8ab488 3708
0c530ab8 3709 *cputype = ret;
0a7de745
A
3710
3711 if (refheld != 0) {
2d21ac55 3712 proc_rele(p);
0a7de745 3713 }
2d21ac55 3714out:
0a7de745 3715 return error;
0c530ab8
A
3716}
3717
f427ee49 3718
6d2010ae 3719STATIC int
2d21ac55 3720sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
0a7de745 3721 struct sysctl_req *req)
0c530ab8
A
3722{
3723 int error;
3724 cpu_type_t proc_cputype = 0;
0a7de745 3725 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0) {
0c530ab8 3726 return error;
0a7de745 3727 }
0c530ab8 3728 int res = 1;
0a7de745 3729 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) {
0c530ab8 3730 res = 0;
0a7de745 3731 }
0c530ab8 3732 return SYSCTL_OUT(req, &res, sizeof(res));
0a7de745 3733}
f427ee49 3734SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native, "I", "proc_native");
0c530ab8 3735
6d2010ae 3736STATIC int
2d21ac55 3737sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
0a7de745 3738 struct sysctl_req *req)
0c530ab8
A
3739{
3740 int error;
3741 cpu_type_t proc_cputype = 0;
0a7de745 3742 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0) {
0c530ab8 3743 return error;
0a7de745 3744 }
0c530ab8
A
3745 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
3746}
0a7de745 3747SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype, "I", "proc_cputype");
0c530ab8 3748
6d2010ae 3749STATIC int
2d21ac55
A
3750sysctl_safeboot
3751(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3752{
3753 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
3754}
3755
3756SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
0a7de745
A
3757 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3758 0, 0, sysctl_safeboot, "I", "");
2d21ac55 3759
6d2010ae 3760STATIC int
2d21ac55
A
3761sysctl_singleuser
3762(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3763{
3764 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
3765}
3766
3767SYSCTL_PROC(_kern, OID_AUTO, singleuser,
0a7de745
A
3768 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3769 0, 0, sysctl_singleuser, "I", "");
2d21ac55 3770
0a7de745
A
3771STATIC int
3772sysctl_minimalboot
3e170ce0
A
3773(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3774{
3775 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
3776}
3777
3778SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
0a7de745
A
3779 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3780 0, 0, sysctl_minimalboot, "I", "");
3e170ce0 3781
2d21ac55
A
3782/*
3783 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3784 */
0a7de745
A
3785extern boolean_t affinity_sets_enabled;
3786extern int affinity_sets_mapping;
2d21ac55 3787
0a7de745
A
3788SYSCTL_INT(_kern, OID_AUTO, affinity_sets_enabled,
3789 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
3790SYSCTL_INT(_kern, OID_AUTO, affinity_sets_mapping,
3791 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2d21ac55 3792
316670eb
A
3793/*
3794 * Boolean indicating if KASLR is active.
3795 */
3796STATIC int
3797sysctl_slide
3798(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3799{
0a7de745 3800 uint32_t slide;
316670eb
A
3801
3802 slide = vm_kernel_slide ? 1 : 0;
3803
3804 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
3805}
3806
3807SYSCTL_PROC(_kern, OID_AUTO, slide,
0a7de745
A
3808 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3809 0, 0, sysctl_slide, "I", "");
316670eb 3810
f427ee49
A
3811/* User address of the PFZ */
3812#if DEBUG || DEVELOPMENT
3813extern user32_addr_t commpage_text32_location;
3814extern user64_addr_t commpage_text64_location;
3815
3816STATIC int
3817sysctl_pfz_start SYSCTL_HANDLER_ARGS
3818{
3819#pragma unused(oidp, arg1, arg2)
3820
3821#ifdef __LP64__
3822 return sysctl_io_number(req, commpage_text64_location, sizeof(user64_addr_t), NULL, NULL);
3823#else
3824 return sysctl_io_number(req, commpage_text32_location, sizeof(user32_addr_t), NULL, NULL);
3825#endif
3826}
3827
3828SYSCTL_PROC(_kern, OID_AUTO, pfz,
3829 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
3830 0, 0, sysctl_pfz_start, "I", "");
3831#endif
3832
3833
2d21ac55
A
3834/*
3835 * Limit on total memory users can wire.
3836 *
0a7de745 3837 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2d21ac55 3838 *
4ba76501 3839 * vm_per_task_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2d21ac55
A
3840 *
3841 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3842 * kmem_init().
3843 *
3844 * All values are in bytes.
3845 */
3846
0a7de745 3847vm_map_size_t vm_global_user_wire_limit;
4ba76501 3848vm_map_size_t vm_per_task_user_wire_limit;
f427ee49 3849extern uint64_t max_mem_actual, max_mem;
2d21ac55 3850
f427ee49
A
3851uint64_t vm_add_wire_count_over_global_limit;
3852uint64_t vm_add_wire_count_over_user_limit;
4ba76501
A
3853/*
3854 * We used to have a global in the kernel called vm_global_no_user_wire_limit which was the inverse
3855 * of vm_global_user_wire_limit. But maintaining both of those is silly, and vm_global_user_wire_limit is the
3856 * real limit.
3857 * This function is for backwards compatibility with userspace
3858 * since we exposed the old global via a sysctl.
3859 */
3860STATIC int
3861sysctl_global_no_user_wire_amount(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3862{
3863 vm_map_size_t old_value;
3864 vm_map_size_t new_value;
3865 int changed;
3866 int error;
f427ee49
A
3867 uint64_t config_memsize = max_mem;
3868#if defined(XNU_TARGET_OS_OSX)
3869 config_memsize = max_mem_actual;
3870#endif /* defined(XNU_TARGET_OS_OSX) */
4ba76501 3871
f427ee49 3872 old_value = (vm_map_size_t)(config_memsize - vm_global_user_wire_limit);
4ba76501
A
3873 error = sysctl_io_number(req, old_value, sizeof(vm_map_size_t), &new_value, &changed);
3874 if (changed) {
f427ee49 3875 if ((uint64_t)new_value > config_memsize) {
4ba76501
A
3876 error = EINVAL;
3877 } else {
f427ee49 3878 vm_global_user_wire_limit = (vm_map_size_t)(config_memsize - new_value);
4ba76501
A
3879 }
3880 }
3881 return error;
3882}
b0d623f7
A
3883/*
3884 * There needs to be a more automatic/elegant way to do this
3885 */
5ba3f43e 3886#if defined(__ARM__)
5ba3f43e 3887SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
4ba76501
A
3888SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_per_task_user_wire_limit, 0, "");
3889SYSCTL_PROC(_vm, OID_AUTO, global_no_user_wire_amount, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, &sysctl_global_no_user_wire_amount, "I", "");
5ba3f43e 3890#else
6d2010ae 3891SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
4ba76501
A
3892SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_per_task_user_wire_limit, "");
3893SYSCTL_PROC(_vm, OID_AUTO, global_no_user_wire_amount, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, &sysctl_global_no_user_wire_amount, "Q", "");
5ba3f43e 3894#endif
b0d623f7 3895
f427ee49
A
3896/*
3897 * Relaxed atomic RW of a 64bit value via sysctl.
3898 */
3899STATIC int
3900sysctl_r_64bit_atomic(uint64_t *ptr, struct sysctl_req *req)
3901{
3902 uint64_t old_value;
3903 uint64_t new_value;
3904 int error;
3905
3906 old_value = os_atomic_load_wide(ptr, relaxed);
3907 error = sysctl_io_number(req, old_value, sizeof(vm_map_size_t), &new_value, NULL);
3908 return error;
3909}
3910STATIC int
3911sysctl_add_wire_count_over_global_limit(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3912{
3913 return sysctl_r_64bit_atomic(&vm_add_wire_count_over_global_limit, req);
3914}
3915STATIC int
3916sysctl_add_wire_count_over_user_limit(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3917{
3918 return sysctl_r_64bit_atomic(&vm_add_wire_count_over_user_limit, req);
3919}
3920
3921SYSCTL_PROC(_vm, OID_AUTO, add_wire_count_over_global_limit, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_add_wire_count_over_global_limit, "Q", "");
3922SYSCTL_PROC(_vm, OID_AUTO, add_wire_count_over_user_limit, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_add_wire_count_over_user_limit, "Q", "");
3923
4ba76501 3924#if DEVELOPMENT || DEBUG
f427ee49 3925/* These sysctls are used to test the wired limit. */
4ba76501
A
3926extern unsigned int vm_page_wire_count;
3927extern uint32_t vm_lopage_free_count;
3928SYSCTL_INT(_vm, OID_AUTO, page_wire_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_wire_count, 0, "");
3929SYSCTL_INT(_vm, OID_AUTO, lopage_free_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_lopage_free_count, 0, "");
3930#endif /* DEVELOPMENT */
3931
e2d2fc5c
A
3932extern int vm_map_copy_overwrite_aligned_src_not_internal;
3933extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
3934extern int vm_map_copy_overwrite_aligned_src_large;
3935SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
3936SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
3937SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
b0d623f7
A
3938
3939
0a7de745 3940extern uint32_t vm_page_external_count;
39236c6e
A
3941
3942SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
d9a64523
A
3943
3944SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min, 0, "");
3945SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min, 0, "");
3946
3947#if DEVELOPMENT || DEBUG
3948SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min_divisor, 0, "");
3949SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min_divisor, 0, "");
3950#endif
39236c6e 3951
0a7de745
A
3952extern int vm_compressor_mode;
3953extern int vm_compressor_is_active;
3954extern int vm_compressor_available;
3955extern uint32_t vm_ripe_target_age;
3956extern uint32_t swapout_target_age;
39236c6e 3957extern int64_t compressor_bytes_used;
3e170ce0
A
3958extern int64_t c_segment_input_bytes;
3959extern int64_t c_segment_compressed_bytes;
0a7de745
A
3960extern uint32_t compressor_eval_period_in_msecs;
3961extern uint32_t compressor_sample_min_in_msecs;
3962extern uint32_t compressor_sample_max_in_msecs;
3963extern uint32_t compressor_thrashing_threshold_per_10msecs;
3964extern uint32_t compressor_thrashing_min_per_10msecs;
d9a64523
A
3965extern uint32_t vm_compressor_time_thread;
3966
3967#if DEVELOPMENT || DEBUG
0a7de745
A
3968extern uint32_t vm_compressor_minorcompact_threshold_divisor;
3969extern uint32_t vm_compressor_majorcompact_threshold_divisor;
3970extern uint32_t vm_compressor_unthrottle_threshold_divisor;
3971extern uint32_t vm_compressor_catchup_threshold_divisor;
d9a64523 3972
0a7de745
A
3973extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden;
3974extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden;
3975extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden;
3976extern uint32_t vm_compressor_catchup_threshold_divisor_overridden;
d9a64523 3977
5ba3f43e 3978extern vmct_stats_t vmct_stats;
d9a64523
A
3979
3980
3981STATIC int
3982sysctl_minorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3983{
3984 int new_value, changed;
3985 int error = sysctl_io_number(req, vm_compressor_minorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
3986
3987 if (changed) {
0a7de745
A
3988 vm_compressor_minorcompact_threshold_divisor = new_value;
3989 vm_compressor_minorcompact_threshold_divisor_overridden = 1;
d9a64523 3990 }
0a7de745 3991 return error;
d9a64523
A
3992}
3993
3994SYSCTL_PROC(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor,
0a7de745
A
3995 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
3996 0, 0, sysctl_minorcompact_threshold_divisor, "I", "");
d9a64523
A
3997
3998
3999STATIC int
4000sysctl_majorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4001{
4002 int new_value, changed;
4003 int error = sysctl_io_number(req, vm_compressor_majorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
4004
4005 if (changed) {
0a7de745
A
4006 vm_compressor_majorcompact_threshold_divisor = new_value;
4007 vm_compressor_majorcompact_threshold_divisor_overridden = 1;
d9a64523 4008 }
0a7de745 4009 return error;
d9a64523
A
4010}
4011
4012SYSCTL_PROC(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor,
0a7de745
A
4013 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4014 0, 0, sysctl_majorcompact_threshold_divisor, "I", "");
d9a64523
A
4015
4016
4017STATIC int
4018sysctl_unthrottle_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4019{
4020 int new_value, changed;
4021 int error = sysctl_io_number(req, vm_compressor_unthrottle_threshold_divisor, sizeof(int), &new_value, &changed);
4022
4023 if (changed) {
0a7de745
A
4024 vm_compressor_unthrottle_threshold_divisor = new_value;
4025 vm_compressor_unthrottle_threshold_divisor_overridden = 1;
d9a64523 4026 }
0a7de745 4027 return error;
d9a64523
A
4028}
4029
4030SYSCTL_PROC(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor,
0a7de745
A
4031 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4032 0, 0, sysctl_unthrottle_threshold_divisor, "I", "");
d9a64523
A
4033
4034
4035STATIC int
4036sysctl_catchup_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4037{
4038 int new_value, changed;
4039 int error = sysctl_io_number(req, vm_compressor_catchup_threshold_divisor, sizeof(int), &new_value, &changed);
4040
4041 if (changed) {
0a7de745
A
4042 vm_compressor_catchup_threshold_divisor = new_value;
4043 vm_compressor_catchup_threshold_divisor_overridden = 1;
d9a64523 4044 }
0a7de745 4045 return error;
d9a64523
A
4046}
4047
4048SYSCTL_PROC(_vm, OID_AUTO, compressor_catchup_threshold_divisor,
0a7de745
A
4049 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4050 0, 0, sysctl_catchup_threshold_divisor, "I", "");
5ba3f43e 4051#endif
39236c6e 4052
d9a64523 4053
3e170ce0
A
4054SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
4055SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
4056SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
4057
39236c6e 4058SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
04b8595b 4059SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
39236c6e 4060SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
3e170ce0
A
4061SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
4062
bca245ac
A
4063extern int min_csegs_per_major_compaction;
4064SYSCTL_INT(_vm, OID_AUTO, compressor_min_csegs_per_major_compaction, CTLFLAG_RW | CTLFLAG_LOCKED, &min_csegs_per_major_compaction, 0, "");
4065
3e170ce0 4066SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
39236c6e
A
4067
4068SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
4069SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
4070SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
4071SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
4072SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
39236c6e 4073
fe8ab488
A
4074SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
4075
39037602 4076SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
5ba3f43e
A
4077
4078#if DEVELOPMENT || DEBUG
4079SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[0], "");
4080SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[1], "");
4081
4082SYSCTL_QUAD(_vm, OID_AUTO, compressor_threads_total, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_cthreads_total, "");
4083
4084SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[0], "");
4085SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[1], "");
4086
4087SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[0], "");
4088SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[1], "");
4089
4090SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[0], 0, "");
4091SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[1], 0, "");
4092
4093SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, "");
4094SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, "");
4095
f427ee49
A
4096int vm_compressor_injected_error_count;
4097
4098SYSCTL_INT(_vm, OID_AUTO, compressor_injected_error_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_injected_error_count, 0, "");
4099
4100static int
4101sysctl_compressor_inject_error(__unused struct sysctl_oid *oidp,
4102 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4103{
4104 int result;
4105 vm_address_t va = 0;
4106 int changed;
4107
4108 result = sysctl_io_number(req, va, sizeof(va), &va, &changed);
4109 if (result == 0 && changed) {
4110 result = vm_map_inject_error(current_map(), va);
4111 if (result == 0) {
4112 /*
4113 * Count the number of errors injected successfully to detect
4114 * situations where corruption was caused by improper use of this
4115 * sysctl.
4116 */
4117 os_atomic_inc(&vm_compressor_injected_error_count, relaxed);
4118 }
4119 }
4120 return result;
4121}
4122
4123SYSCTL_PROC(_vm, OID_AUTO, compressor_inject_error, CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_RW,
4124 0, 0, sysctl_compressor_inject_error, "Q", "flips a bit in a compressed page for the current task");
4125
5ba3f43e 4126#endif
39037602
A
4127
4128SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
4129SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
4130SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
4131SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
4132SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
4133
4134SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
4135SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
4136
4137SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
4138
4139SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
5ba3f43e
A
4140
4141SYSCTL_QUAD(_vm, OID_AUTO, wk_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_cabstime, "");
4142
4143SYSCTL_QUAD(_vm, OID_AUTO, wkh_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_cabstime, "");
4144SYSCTL_QUAD(_vm, OID_AUTO, wkh_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_compressions, "");
4145
4146SYSCTL_QUAD(_vm, OID_AUTO, wks_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_cabstime, "");
4147SYSCTL_QUAD(_vm, OID_AUTO, wks_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressions, "");
4148
39037602
A
4149SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
4150SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
4151SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
4152SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
4153SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
4154SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
4155
5ba3f43e
A
4156SYSCTL_QUAD(_vm, OID_AUTO, wks_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressed_bytes, "");
4157SYSCTL_QUAD(_vm, OID_AUTO, wks_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compression_failures, "");
4158SYSCTL_QUAD(_vm, OID_AUTO, wks_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_sv_compressions, "");
4159
4160
39037602 4161SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
5ba3f43e
A
4162
4163SYSCTL_QUAD(_vm, OID_AUTO, wk_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_dabstime, "");
4164
4165SYSCTL_QUAD(_vm, OID_AUTO, wkh_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_dabstime, "");
4166SYSCTL_QUAD(_vm, OID_AUTO, wkh_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_decompressions, "");
4167
4168SYSCTL_QUAD(_vm, OID_AUTO, wks_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_dabstime, "");
4169SYSCTL_QUAD(_vm, OID_AUTO, wks_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_decompressions, "");
4170
39037602
A
4171SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
4172SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
4173
4174SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
4175SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
4176SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
4177SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
4178SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
4179SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
4180SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
4181SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
5ba3f43e
A
4182#if DEVELOPMENT || DEBUG
4183extern int vm_compressor_current_codec;
4184extern int vm_compressor_test_seg_wp;
4185extern boolean_t vm_compressor_force_sw_wkdm;
4186SYSCTL_INT(_vm, OID_AUTO, compressor_codec, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_current_codec, 0, "");
4187SYSCTL_INT(_vm, OID_AUTO, compressor_test_wp, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_test_seg_wp, 0, "");
4188
4189SYSCTL_INT(_vm, OID_AUTO, wksw_force, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_force_sw_wkdm, 0, "");
4190extern int precompy, wkswhw;
4191
4192SYSCTL_INT(_vm, OID_AUTO, precompy, CTLFLAG_RW | CTLFLAG_LOCKED, &precompy, 0, "");
4193SYSCTL_INT(_vm, OID_AUTO, wkswhw, CTLFLAG_RW | CTLFLAG_LOCKED, &wkswhw, 0, "");
4194extern unsigned int vm_ktrace_enabled;
4195SYSCTL_INT(_vm, OID_AUTO, vm_ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ktrace_enabled, 0, "");
4196#endif
39037602 4197
fe8ab488
A
4198#if CONFIG_PHANTOM_CACHE
4199extern uint32_t phantom_cache_thrashing_threshold;
4200extern uint32_t phantom_cache_eval_period_in_msecs;
4201extern uint32_t phantom_cache_thrashing_threshold_ssd;
4202
4203
4204SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
4205SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
4206SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
4207#endif
4208
39037602
A
4209#if CONFIG_BACKGROUND_QUEUE
4210
0a7de745
A
4211extern uint32_t vm_page_background_count;
4212extern uint32_t vm_page_background_target;
4213extern uint32_t vm_page_background_internal_count;
4214extern uint32_t vm_page_background_external_count;
4215extern uint32_t vm_page_background_mode;
4216extern uint32_t vm_page_background_exclude_external;
4217extern uint64_t vm_page_background_promoted_count;
39037602
A
4218extern uint64_t vm_pageout_rejected_bq_internal;
4219extern uint64_t vm_pageout_rejected_bq_external;
4220
4221SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
4222SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
39037602
A
4223SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
4224SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
4225SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
4226SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
4227
4228SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
d9a64523
A
4229SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_internal, "");
4230SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_external, "");
39037602
A
4231SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
4232SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
4233
d9a64523
A
4234#endif /* CONFIG_BACKGROUND_QUEUE */
4235
4236extern void vm_update_darkwake_mode(boolean_t);
4237extern boolean_t vm_darkwake_mode;
4238
4239STATIC int
4240sysctl_toggle_darkwake_mode(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4241{
4242 int new_value, changed;
4243 int error = sysctl_io_number(req, vm_darkwake_mode, sizeof(int), &new_value, &changed);
4244
0a7de745 4245 if (!error && changed) {
d9a64523
A
4246 if (new_value != 0 && new_value != 1) {
4247 printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n");
4248 error = EINVAL;
4249 } else {
4250 vm_update_darkwake_mode((boolean_t) new_value);
4251 }
4252 }
4253
0a7de745 4254 return error;
d9a64523
A
4255}
4256
4257SYSCTL_PROC(_vm, OID_AUTO, darkwake_mode,
0a7de745
A
4258 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
4259 0, 0, sysctl_toggle_darkwake_mode, "I", "");
39037602 4260
04b8595b
A
4261#if (DEVELOPMENT || DEBUG)
4262
4263SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
0a7de745
A
4264 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4265 &vm_page_creation_throttled_hard, 0, "");
04b8595b
A
4266
4267SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
0a7de745
A
4268 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4269 &vm_page_creation_throttled_soft, 0, "");
04b8595b 4270
39037602
A
4271extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
4272extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
4273SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
4274SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
4275
cb323159 4276extern uint32_t vm_grab_anon_nops;
39037602 4277
d9a64523
A
4278SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_overrides, 0, "");
4279SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_nops, 0, "");
39037602 4280
f427ee49
A
4281extern int vm_page_delayed_work_ctx_needed;
4282SYSCTL_INT(_vm, OID_AUTO, vm_page_needed_delayed_work_ctx, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_delayed_work_ctx_needed, 0, "");
4283
39037602 4284/* log message counters for persistence mode */
c3c9b80d
A
4285SCALABLE_COUNTER_DECLARE(oslog_p_total_msgcount);
4286SCALABLE_COUNTER_DECLARE(oslog_p_metadata_saved_msgcount);
4287SCALABLE_COUNTER_DECLARE(oslog_p_metadata_dropped_msgcount);
4288SCALABLE_COUNTER_DECLARE(oslog_p_error_count);
4289SCALABLE_COUNTER_DECLARE(oslog_p_saved_msgcount);
4290SCALABLE_COUNTER_DECLARE(oslog_p_dropped_msgcount);
4291SCALABLE_COUNTER_DECLARE(oslog_p_boot_dropped_msgcount);
4292SCALABLE_COUNTER_DECLARE(oslog_p_coprocessor_total_msgcount);
4293SCALABLE_COUNTER_DECLARE(oslog_p_coprocessor_dropped_msgcount);
4294SCALABLE_COUNTER_DECLARE(oslog_p_unresolved_kc_msgcount);
4295SCALABLE_COUNTER_DECLARE(oslog_p_fmt_invalid_msgcount);
4296SCALABLE_COUNTER_DECLARE(oslog_p_fmt_max_args_msgcount);
4297SCALABLE_COUNTER_DECLARE(oslog_p_truncated_msgcount);
39037602
A
4298
4299/* log message counters for streaming mode */
4300extern uint32_t oslog_s_total_msgcount;
4301extern uint32_t oslog_s_metadata_msgcount;
c3c9b80d 4302SCALABLE_COUNTER_DECLARE(oslog_s_error_count);
39037602
A
4303extern uint32_t oslog_s_streamed_msgcount;
4304extern uint32_t oslog_s_dropped_msgcount;
4305
f427ee49
A
4306/* log message counters for msgbuf logging */
4307extern uint32_t oslog_msgbuf_msgcount;
4308extern uint32_t oslog_msgbuf_dropped_msgcount;
4309extern uint32_t oslog_msgbuf_dropped_charcount;
4310
4311/* log message counters for vaddlog logging */
4312extern uint32_t vaddlog_msgcount;
4313extern uint32_t vaddlog_msgcount_dropped;
4314
c3c9b80d
A
4315SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_total_msgcount, oslog_p_total_msgcount, "");
4316SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_metadata_saved_msgcount, oslog_p_metadata_saved_msgcount, "");
4317SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_metadata_dropped_msgcount, oslog_p_metadata_dropped_msgcount, "");
4318SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_error_count, oslog_p_error_count, "");
4319SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_saved_msgcount, oslog_p_saved_msgcount, "");
4320SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_dropped_msgcount, oslog_p_dropped_msgcount, "");
4321SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_boot_dropped_msgcount, oslog_p_boot_dropped_msgcount, "");
4322SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_coprocessor_total_msgcount, oslog_p_coprocessor_total_msgcount, "");
4323SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_coprocessor_dropped_msgcount, oslog_p_coprocessor_dropped_msgcount, "");
4324SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_unresolved_kc_msgcount, oslog_p_unresolved_kc_msgcount, "");
4325
4326SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_fmt_invalid_msgcount, oslog_p_fmt_invalid_msgcount, "");
4327SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_fmt_max_args_msgcount, oslog_p_fmt_max_args_msgcount, "");
4328SYSCTL_SCALABLE_COUNTER(_debug, oslog_p_truncated_msgcount, oslog_p_truncated_msgcount, "");
39037602
A
4329
4330SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
4331SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
c3c9b80d 4332SYSCTL_SCALABLE_COUNTER(_debug, oslog_s_error_count, oslog_s_error_count, "");
39037602
A
4333SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
4334SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
4335
f427ee49
A
4336SYSCTL_UINT(_debug, OID_AUTO, oslog_msgbuf_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_msgbuf_msgcount, 0, "");
4337SYSCTL_UINT(_debug, OID_AUTO, oslog_msgbuf_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_msgbuf_dropped_msgcount, 0, "");
4338SYSCTL_UINT(_debug, OID_AUTO, oslog_msgbuf_dropped_charcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_msgbuf_dropped_charcount, 0, "");
4339
4340SYSCTL_UINT(_debug, OID_AUTO, vaddlog_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &vaddlog_msgcount, 0, "");
4341SYSCTL_UINT(_debug, OID_AUTO, vaddlog_msgcount_dropped, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &vaddlog_msgcount_dropped, 0, "");
39037602 4342
04b8595b
A
4343#endif /* DEVELOPMENT || DEBUG */
4344
b0d623f7 4345/*
fe8ab488 4346 * Enable tracing of voucher contents
b0d623f7 4347 */
fe8ab488 4348extern uint32_t ipc_voucher_trace_contents;
b0d623f7 4349
0a7de745
A
4350SYSCTL_INT(_kern, OID_AUTO, ipc_voucher_trace_contents,
4351 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
b0d623f7
A
4352
4353/*
4354 * Kernel stack size and depth
4355 */
0a7de745
A
4356SYSCTL_INT(_kern, OID_AUTO, stack_size,
4357 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
4358SYSCTL_INT(_kern, OID_AUTO, stack_depth_max,
4359 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
b0d623f7 4360
5ba3f43e 4361extern unsigned int kern_feature_overrides;
0a7de745
A
4362SYSCTL_INT(_kern, OID_AUTO, kern_feature_overrides,
4363 CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
5ba3f43e 4364
b7266188
A
4365/*
4366 * enable back trace for port allocations
4367 */
4368extern int ipc_portbt;
4369
0a7de745
A
4370SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
4371 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
4372 &ipc_portbt, 0, "");
b7266188 4373
f427ee49
A
4374/*
4375 * Mach message signature validation control and outputs
4376 */
4377extern unsigned int ikm_signature_failures;
4378SYSCTL_INT(_kern, OID_AUTO, ikm_signature_failures,
4379 CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_failures, 0, "Message signature failure count");
4380extern unsigned int ikm_signature_failure_id;
4381SYSCTL_INT(_kern, OID_AUTO, ikm_signature_failure_id,
4382 CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_failure_id, 0, "Message signature failure count");
4383
4384#if (DEVELOPMENT || DEBUG)
4385extern unsigned int ikm_signature_panic_disable;
4386SYSCTL_INT(_kern, OID_AUTO, ikm_signature_panic_disable,
4387 CTLFLAG_RW | CTLFLAG_LOCKED, &ikm_signature_panic_disable, 0, "Message signature failure mode");
4388extern unsigned int ikm_signature_header_failures;
4389SYSCTL_INT(_kern, OID_AUTO, ikm_signature_header_failures,
4390 CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_header_failures, 0, "Message header signature failure count");
4391extern unsigned int ikm_signature_trailer_failures;
4392SYSCTL_INT(_kern, OID_AUTO, ikm_signature_trailer_failures,
4393 CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_trailer_failures, 0, "Message trailer signature failure count");
4394#endif
4395
6d2010ae
A
4396/*
4397 * Scheduler sysctls
4398 */
4399
6d2010ae 4400SYSCTL_STRING(_kern, OID_AUTO, sched,
0a7de745
A
4401 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4402 sched_string, sizeof(sched_string),
4403 "Timeshare scheduler implementation");
316670eb 4404
d9a64523
A
4405#if CONFIG_QUIESCE_COUNTER
4406static int
4407sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
4408{
4409#pragma unused(arg1, arg2)
4410
cb323159
A
4411 uint32_t local_min_interval_us = cpu_quiescent_counter_get_min_interval_us();
4412
4413 int error = sysctl_handle_int(oidp, &local_min_interval_us, 0, req);
0a7de745 4414 if (error || !req->newptr) {
d9a64523 4415 return error;
0a7de745 4416 }
d9a64523 4417
cb323159 4418 cpu_quiescent_counter_set_min_interval_us(local_min_interval_us);
d9a64523
A
4419
4420 return 0;
4421}
4422
4423SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval,
0a7de745
A
4424 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
4425 0, 0,
4426 sysctl_cpu_quiescent_counter_interval, "I",
4427 "Quiescent CPU checkin interval (microseconds)");
d9a64523
A
4428#endif /* CONFIG_QUIESCE_COUNTER */
4429
4430
316670eb 4431/*
f427ee49 4432 * Only support runtime modification on development / debug
316670eb 4433 */
f427ee49 4434#if DEVELOPMENT || DEBUG
5ba3f43e 4435extern int precise_user_kernel_time;
0a7de745
A
4436SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
4437 CTLFLAG_RW | CTLFLAG_LOCKED,
4438 &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
f427ee49 4439#endif /* DEVELOPMENT || DEBUG */
fe8ab488
A
4440
4441/* Parameters related to timer coalescing tuning, to be replaced
4442 * with a dedicated systemcall in the future.
4443 */
4444/* Enable processing pending timers in the context of any other interrupt
4445 * Coalescing tuning parameters for various thread/task attributes */
4446STATIC int
4447sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
4448{
4449#pragma unused(oidp)
0a7de745 4450 int size = arg2; /* subcommand*/
fe8ab488
A
4451 int error;
4452 int changed = 0;
4453 uint64_t old_value_ns;
4454 uint64_t new_value_ns;
4455 uint64_t value_abstime;
0a7de745 4456 if (size == sizeof(uint32_t)) {
fe8ab488 4457 value_abstime = *((uint32_t *)arg1);
0a7de745 4458 } else if (size == sizeof(uint64_t)) {
fe8ab488 4459 value_abstime = *((uint64_t *)arg1);
0a7de745
A
4460 } else {
4461 return ENOTSUP;
4462 }
fe8ab488
A
4463
4464 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
4465 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
0a7de745 4466 if ((error) || (!changed)) {
fe8ab488 4467 return error;
0a7de745 4468 }
fe8ab488
A
4469
4470 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
0a7de745 4471 if (size == sizeof(uint32_t)) {
fe8ab488 4472 *((uint32_t *)arg1) = (uint32_t)value_abstime;
0a7de745 4473 } else {
fe8ab488 4474 *((uint64_t *)arg1) = value_abstime;
0a7de745 4475 }
fe8ab488
A
4476 return error;
4477}
4478
4479SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
4480 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4481 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
4482SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
4483 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4484 &tcoal_prio_params.timer_resort_threshold_abstime,
4485 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
4486 sysctl_timer_user_us_kernel_abstime,
4487 "Q", "");
4488SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
4489 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4490 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
4491 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
4492 sysctl_timer_user_us_kernel_abstime,
4493 "Q", "");
4494
4495SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
4496 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4497 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
4498
4499SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
4500 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4501 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
4502 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
4503 sysctl_timer_user_us_kernel_abstime,
4504 "Q", "");
4505
4506SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
4507 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4508 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
4509
4510SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
4511 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4512 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
0a7de745
A
4513 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
4514 sysctl_timer_user_us_kernel_abstime,
fe8ab488
A
4515 "Q", "");
4516
4517SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
4518 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4519 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
4520
4521SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
4522 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4523 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
4524 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
4525 sysctl_timer_user_us_kernel_abstime,
4526 "Q", "");
4527
4528SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
4529 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4530 &tcoal_prio_params.latency_qos_scale[0], 0, "");
4531
4532SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
4533 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4534 &tcoal_prio_params.latency_qos_abstime_max[0],
4535 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
4536 sysctl_timer_user_us_kernel_abstime,
4537 "Q", "");
4538
4539SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
4540 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4541 &tcoal_prio_params.latency_qos_scale[1], 0, "");
4542
4543SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
4544 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4545 &tcoal_prio_params.latency_qos_abstime_max[1],
4546 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
4547 sysctl_timer_user_us_kernel_abstime,
4548 "Q", "");
4549
4550SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
4551 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4552 &tcoal_prio_params.latency_qos_scale[2], 0, "");
4553
4554SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
4555 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4556 &tcoal_prio_params.latency_qos_abstime_max[2],
4557 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
4558 sysctl_timer_user_us_kernel_abstime,
4559 "Q", "");
4560
4561SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
4562 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4563 &tcoal_prio_params.latency_qos_scale[3], 0, "");
4564
4565SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
4566 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4567 &tcoal_prio_params.latency_qos_abstime_max[3],
4568 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
4569 sysctl_timer_user_us_kernel_abstime,
4570 "Q", "");
4571
4572SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
4573 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4574 &tcoal_prio_params.latency_qos_scale[4], 0, "");
4575
4576SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
4577 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4578 &tcoal_prio_params.latency_qos_abstime_max[4],
4579 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
4580 sysctl_timer_user_us_kernel_abstime,
4581 "Q", "");
4582
4583SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
4584 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4585 &tcoal_prio_params.latency_qos_scale[5], 0, "");
4586
4587SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
4588 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4589 &tcoal_prio_params.latency_qos_abstime_max[5],
4590 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
4591 sysctl_timer_user_us_kernel_abstime,
4592 "Q", "");
4593
4594/* Communicate the "user idle level" heuristic to the timer layer, and
4595 * potentially other layers in the future.
4596 */
4597
4598static int
0a7de745
A
4599timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4600{
fe8ab488
A
4601 int new_value = 0, old_value = 0, changed = 0, error;
4602
4603 old_value = timer_get_user_idle_level();
4604
4605 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
4606
4607 if (error == 0 && changed) {
0a7de745 4608 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS) {
fe8ab488 4609 error = ERANGE;
0a7de745 4610 }
fe8ab488
A
4611 }
4612
4613 return error;
4614}
4615
4616SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
4617 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
4618 0, 0,
4619 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
4620
4621#if HYPERVISOR
0a7de745
A
4622SYSCTL_INT(_kern, OID_AUTO, hv_support,
4623 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
4624 &hv_support_available, 0, "");
f427ee49
A
4625
4626SYSCTL_INT(_kern, OID_AUTO, hv_disable,
4627 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4628 &hv_disable, 0, "");
4629#endif
4630
4631#if DEVELOPMENT || DEBUG
4632extern uint64_t driverkit_checkin_timed_out;
4633SYSCTL_QUAD(_kern, OID_AUTO, driverkit_checkin_timed_out,
4634 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
4635 &driverkit_checkin_timed_out, "timestamp of dext checkin timeout");
fe8ab488 4636#endif
3e170ce0 4637
c3c9b80d
A
4638extern int IOGetVMMPresent(void);
4639
f427ee49
A
4640static int
4641hv_vmm_present SYSCTL_HANDLER_ARGS
4642{
4643 __unused struct sysctl_oid *unused_oidp = oidp;
4644 __unused void *unused_arg1 = arg1;
4645 __unused int unused_arg2 = arg2;
4646
4647 int hv_vmm_present = 0;
4648
c3c9b80d 4649 hv_vmm_present = IOGetVMMPresent();
f427ee49
A
4650
4651 return SYSCTL_OUT(req, &hv_vmm_present, sizeof(hv_vmm_present));
4652}
4653
4654SYSCTL_PROC(_kern, OID_AUTO, hv_vmm_present,
4655 CTLTYPE_INT | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4656 0, 0,
4657 hv_vmm_present, "I", "");
4658
4659#if CONFIG_DARKBOOT
5ba3f43e
A
4660STATIC int
4661sysctl_darkboot SYSCTL_HANDLER_ARGS
4662{
4663 int err = 0, value = 0;
4664#pragma unused(oidp, arg1, arg2, err, value, req)
4665
4666 /*
4667 * Handle the sysctl request.
4668 *
4669 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
4670 * we'll get the request identifier into "value" and then we can honor it.
4671 */
4672 if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
4673 goto exit;
4674 }
4675
4676 /* writing requested, let's process the request */
4677 if (req->newptr) {
4678 /* writing is protected by an entitlement */
4679 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
4680 err = EPERM;
4681 goto exit;
4682 }
4683
4684 switch (value) {
4685 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
4686 /*
4687 * If the darkboot sysctl is unset, the NVRAM variable
4688 * must be unset too. If that's not the case, it means
4689 * someone is doing something crazy and not supported.
4690 */
4691 if (darkboot != 0) {
4692 int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
4693 if (ret) {
4694 darkboot = 0;
4695 } else {
4696 err = EINVAL;
4697 }
4698 }
4699 break;
4700 case MEMORY_MAINTENANCE_DARK_BOOT_SET:
4701 darkboot = 1;
4702 break;
4703 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
4704 /*
4705 * Set the NVRAM and update 'darkboot' in case
4706 * of success. Otherwise, do not update
4707 * 'darkboot' and report the failure.
4708 */
4709 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
4710 darkboot = 1;
4711 } else {
4712 err = EINVAL;
4713 }
4714
4715 break;
4716 }
4717 default:
4718 err = EINVAL;
4719 }
4720 }
4721
4722exit:
4723 return err;
4724}
4725
4726SYSCTL_PROC(_kern, OID_AUTO, darkboot,
0a7de745
A
4727 CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
4728 0, 0, sysctl_darkboot, "I", "");
f427ee49 4729#endif /* CONFIG_DARKBOOT */
3e170ce0 4730
39037602
A
4731#if DEVELOPMENT || DEBUG
4732#include <sys/sysent.h>
4733/* This should result in a fatal exception, verifying that "sysent" is
4734 * write-protected.
4735 */
4736static int
0a7de745
A
4737kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4738{
39037602
A
4739 uint64_t new_value = 0, old_value = 0;
4740 int changed = 0, error;
4741
4742 error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
4743 if ((error == 0) && changed) {
f427ee49 4744 volatile uint32_t *wraddr = __DECONST(uint32_t *, &sysent[0]);
39037602
A
4745 *wraddr = 0;
4746 printf("sysent[0] write succeeded\n");
4747 }
4748 return error;
4749}
4750
4751SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
4752 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
4753 0, 0,
4754 kern_sysent_write, "I", "Attempt sysent[0] write");
4755
4756#endif
4757
4758#if DEVELOPMENT || DEBUG
c3c9b80d 4759SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_KERN, NULL, 1, "");
39037602
A
4760#else
4761SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
4762#endif
5ba3f43e
A
4763
4764
4765#if DEVELOPMENT || DEBUG
4766
4767static int
4768sysctl_panic_test SYSCTL_HANDLER_ARGS
4769{
4770#pragma unused(arg1, arg2)
0a7de745
A
4771 int rval = 0;
4772 char str[32] = "entry prelog postlog postcore";
4773
4774 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
4775
4776 if (rval == 0 && req->newptr) {
4777 if (strncmp("entry", str, strlen("entry")) == 0) {
4778 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
4779 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
4780 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
4781 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
4782 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
4783 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
4784 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
4785 }
4786 }
4787
4788 return rval;
5ba3f43e
A
4789}
4790
4791static int
4792sysctl_debugger_test SYSCTL_HANDLER_ARGS
4793{
4794#pragma unused(arg1, arg2)
0a7de745
A
4795 int rval = 0;
4796 char str[32] = "entry prelog postlog postcore";
4797
4798 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
4799
4800 if (rval == 0 && req->newptr) {
4801 if (strncmp("entry", str, strlen("entry")) == 0) {
4802 DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
4803 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
4804 DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
4805 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
4806 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
4807 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
4808 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
4809 }
4810 }
4811
4812 return rval;
5ba3f43e
A
4813}
4814
cb323159 4815decl_lck_spin_data(, spinlock_panic_test_lock);
5ba3f43e
A
4816
4817__attribute__((noreturn))
4818static void
4819spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
4820{
4821 lck_spin_lock(&spinlock_panic_test_lock);
0a7de745
A
4822 while (1) {
4823 ;
4824 }
5ba3f43e
A
4825}
4826
4827static int
4828sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
4829{
4830#pragma unused(oidp, arg1, arg2)
0a7de745 4831 if (req->newlen == 0) {
5ba3f43e 4832 return EINVAL;
0a7de745 4833 }
5ba3f43e
A
4834
4835 thread_t panic_spinlock_thread;
4836 /* Initialize panic spinlock */
4837 lck_grp_t * panic_spinlock_grp;
4838 lck_grp_attr_t * panic_spinlock_grp_attr;
4839 lck_attr_t * panic_spinlock_attr;
4840
4841 panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
0a7de745 4842 panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
5ba3f43e
A
4843 panic_spinlock_attr = lck_attr_alloc_init();
4844
4845 lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
4846
4847
4848 /* Create thread to acquire spinlock */
4849 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
4850 return EBUSY;
4851 }
4852
4853 /* Try to acquire spinlock -- should panic eventually */
4854 lck_spin_lock(&spinlock_panic_test_lock);
0a7de745
A
4855 while (1) {
4856 ;
4857 }
5ba3f43e
A
4858}
4859
4860__attribute__((noreturn))
4861static void
4862simultaneous_panic_worker
4863(void * arg, wait_result_t wres __unused)
4864{
4865 atomic_int *start_panic = (atomic_int *)arg;
4866
0a7de745
A
4867 while (!atomic_load(start_panic)) {
4868 ;
4869 }
5ba3f43e
A
4870 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
4871 __builtin_unreachable();
4872}
4873
4874static int
4875sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
4876{
4877#pragma unused(oidp, arg1, arg2)
0a7de745 4878 if (req->newlen == 0) {
5ba3f43e 4879 return EINVAL;
0a7de745 4880 }
5ba3f43e
A
4881
4882 int i = 0, threads_to_create = 2 * processor_count;
4883 atomic_int start_panic = 0;
4884 unsigned int threads_created = 0;
4885 thread_t new_panic_thread;
4886
4887 for (i = threads_to_create; i > 0; i--) {
4888 if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
4889 threads_created++;
4890 }
4891 }
4892
4893 /* FAIL if we couldn't create at least processor_count threads */
4894 if (threads_created < processor_count) {
4895 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
0a7de745 4896 threads_created, threads_to_create);
5ba3f43e
A
4897 }
4898
4899 atomic_exchange(&start_panic, 1);
0a7de745
A
4900 while (1) {
4901 ;
4902 }
5ba3f43e
A
4903}
4904
4905SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
4906SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
4907SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
4908SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
4909
d9a64523
A
4910extern int exc_resource_threads_enabled;
4911
4912SYSCTL_INT(_kern, OID_AUTO, exc_resource_threads_enabled, CTLFLAG_RD | CTLFLAG_LOCKED, &exc_resource_threads_enabled, 0, "exc_resource thread limit enabled");
4913
5ba3f43e
A
4914
4915#endif /* DEVELOPMENT || DEBUG */
4916
f427ee49
A
4917#if CONFIG_THREAD_GROUPS
4918#if DEVELOPMENT || DEBUG
4919
4920static int
4921sysctl_get_thread_group_id SYSCTL_HANDLER_ARGS
4922{
4923#pragma unused(arg1, arg2, oidp)
4924 uint64_t thread_group_id = thread_group_get_id(thread_group_get(current_thread()));
4925 return SYSCTL_OUT(req, &thread_group_id, sizeof(thread_group_id));
4926}
4927
4928SYSCTL_PROC(_kern, OID_AUTO, thread_group_id, CTLFLAG_RD | CTLFLAG_LOCKED | CTLTYPE_QUAD,
4929 0, 0, &sysctl_get_thread_group_id, "I", "thread group id of the thread");
4930
4931STATIC int
4932sysctl_thread_group_count(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4933{
4934 int value = thread_group_count();
4935 return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
4936}
4937
4938SYSCTL_PROC(_kern, OID_AUTO, thread_group_count, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
4939 0, 0, &sysctl_thread_group_count, "I", "count of thread groups");
4940
4941#endif /* DEVELOPMENT || DEBUG */
4942const uint32_t thread_groups_supported = 1;
4943#else /* CONFIG_THREAD_GROUPS */
5ba3f43e 4944const uint32_t thread_groups_supported = 0;
f427ee49 4945#endif /* CONFIG_THREAD_GROUPS */
5ba3f43e
A
4946
4947STATIC int
0a7de745 4948sysctl_thread_groups_supported(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
5ba3f43e
A
4949{
4950 int value = thread_groups_supported;
0a7de745 4951 return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
5ba3f43e
A
4952}
4953
4954SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
4955 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
4956
4957static int
4958sysctl_grade_cputype SYSCTL_HANDLER_ARGS
4959{
4960#pragma unused(arg1, arg2, oidp)
4961 int error = 0;
4962 int type_tuple[2] = {};
4963 int return_value = 0;
4964
4965 error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
4966
4967 if (error) {
4968 return error;
4969 }
4970
f427ee49 4971 return_value = grade_binary(type_tuple[0], type_tuple[1] & ~CPU_SUBTYPE_MASK, type_tuple[1] & CPU_SUBTYPE_MASK, FALSE);
5ba3f43e
A
4972
4973 error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
4974
4975 if (error) {
4976 return error;
4977 }
4978
4979 return error;
4980}
4981
4982SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
0a7de745
A
4983 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLTYPE_OPAQUE,
4984 0, 0, &sysctl_grade_cputype, "S",
4985 "grade value of cpu_type_t+cpu_sub_type_t");
d9a64523 4986
f427ee49
A
4987extern boolean_t allow_direct_handoff;
4988SYSCTL_INT(_kern, OID_AUTO, direct_handoff,
4989 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
4990 &allow_direct_handoff, 0, "Enable direct handoff for realtime threads");
d9a64523
A
4991
4992#if DEVELOPMENT || DEBUG
4993
f427ee49
A
4994SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED,
4995 &phys_carveout_pa,
4996 "base physical address of the phys_carveout_mb boot-arg region");
4997SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED,
4998 &phys_carveout_size,
4999 "size in bytes of the phys_carveout_mb boot-arg region");
5000
ea3f0419
A
5001extern void do_cseg_wedge_thread(void);
5002extern void do_cseg_unwedge_thread(void);
5003
5004static int
5005cseg_wedge_thread SYSCTL_HANDLER_ARGS
5006{
5007#pragma unused(arg1, arg2)
5008
5009 int error, val = 0;
5010 error = sysctl_handle_int(oidp, &val, 0, req);
5011 if (error || val == 0) {
5012 return error;
5013 }
5014
5015 do_cseg_wedge_thread();
5016 return 0;
5017}
5018SYSCTL_PROC(_kern, OID_AUTO, cseg_wedge_thread, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, cseg_wedge_thread, "I", "wedge c_seg thread");
5019
5020static int
5021cseg_unwedge_thread SYSCTL_HANDLER_ARGS
5022{
5023#pragma unused(arg1, arg2)
5024
5025 int error, val = 0;
5026 error = sysctl_handle_int(oidp, &val, 0, req);
5027 if (error || val == 0) {
5028 return error;
5029 }
5030
5031 do_cseg_unwedge_thread();
5032 return 0;
5033}
5034SYSCTL_PROC(_kern, OID_AUTO, cseg_unwedge_thread, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, cseg_unwedge_thread, "I", "unstuck c_seg thread");
5035
d9a64523
A
5036static atomic_int wedge_thread_should_wake = 0;
5037
5038static int
5039unwedge_thread SYSCTL_HANDLER_ARGS
5040{
5041#pragma unused(arg1, arg2)
5042 int error, val = 0;
5043 error = sysctl_handle_int(oidp, &val, 0, req);
5044 if (error || val == 0) {
5045 return error;
5046 }
5047
5048 atomic_store(&wedge_thread_should_wake, 1);
5049 return 0;
5050}
5051
5052SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, unwedge_thread, "I", "unwedge the thread wedged by kern.wedge_thread");
5053
d9a64523
A
5054static int
5055wedge_thread SYSCTL_HANDLER_ARGS
5056{
0a7de745
A
5057#pragma unused(arg1, arg2)
5058
5059 int error, val = 0;
d9a64523
A
5060 error = sysctl_handle_int(oidp, &val, 0, req);
5061 if (error || val == 0) {
0a7de745 5062 return error;
d9a64523 5063 }
0a7de745 5064
d9a64523
A
5065 uint64_t interval = 1;
5066 nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval);
5067
5068 atomic_store(&wedge_thread_should_wake, 0);
5069 while (!atomic_load(&wedge_thread_should_wake)) {
0a7de745 5070 tsleep1(NULL, 0, "wedge_thread", mach_absolute_time() + interval, NULL);
d9a64523 5071 }
0a7de745 5072
d9a64523
A
5073 return 0;
5074}
5075
f427ee49
A
5076SYSCTL_PROC(_kern, OID_AUTO, wedge_thread,
5077 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I",
5078 "wedge this thread so it cannot be cleaned up");
4ba76501
A
5079
5080static int
5081sysctl_total_corpses_count SYSCTL_HANDLER_ARGS
5082{
5083#pragma unused(oidp, arg1, arg2)
f427ee49
A
5084 extern unsigned long total_corpses_count(void);
5085
5086 unsigned long corpse_count_long = total_corpses_count();
5087 unsigned int corpse_count = (unsigned int)MIN(corpse_count_long, UINT_MAX);
5088 return sysctl_io_opaque(req, &corpse_count, sizeof(corpse_count), NULL);
4ba76501
A
5089}
5090
f427ee49
A
5091SYSCTL_PROC(_kern, OID_AUTO, total_corpses_count,
5092 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0,
5093 sysctl_total_corpses_count, "I", "total corpses on the system");
4ba76501 5094
d9a64523
A
5095static int
5096sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS;
5097static int
5098sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS;
5099int
5100tstile_test_prim_lock(boolean_t use_hashtable);
5101int
5102tstile_test_prim_unlock(boolean_t use_hashtable);
5103
d9a64523
A
5104static int
5105sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
5106{
5107#pragma unused(arg1, arg2)
5108 int error, val = 0;
5109 error = sysctl_handle_int(oidp, &val, 0, req);
5110 if (error || val == 0) {
5111 return error;
5112 }
cb323159
A
5113 switch (val) {
5114 case SYSCTL_TURNSTILE_TEST_USER_DEFAULT:
5115 case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE:
5116 case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT:
5117 case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE:
5118 return tstile_test_prim_lock(val);
5119 default:
5120 return error;
5121 }
d9a64523
A
5122}
5123
5124static int
5125sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
5126{
5127#pragma unused(arg1, arg2)
5128 int error, val = 0;
5129 error = sysctl_handle_int(oidp, &val, 0, req);
5130 if (error || val == 0) {
5131 return error;
5132 }
cb323159
A
5133 switch (val) {
5134 case SYSCTL_TURNSTILE_TEST_USER_DEFAULT:
5135 case SYSCTL_TURNSTILE_TEST_USER_HASHTABLE:
5136 case SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT:
5137 case SYSCTL_TURNSTILE_TEST_KERNEL_HASHTABLE:
5138 return tstile_test_prim_unlock(val);
5139 default:
5140 return error;
5141 }
d9a64523
A
5142}
5143
5144SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_lock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
0a7de745 5145 0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock");
d9a64523
A
5146
5147SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_unlock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
0a7de745 5148 0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock");
d9a64523
A
5149
5150int
5151turnstile_get_boost_stats_sysctl(void *req);
5152int
5153turnstile_get_unboost_stats_sysctl(void *req);
5154static int
5155sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS;
5156static int
5157sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS;
5158extern uint64_t thread_block_on_turnstile_count;
5159extern uint64_t thread_block_on_regular_waitq_count;
5160
5161static int
5162sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
5163{
5164#pragma unused(arg1, arg2, oidp)
5165 return turnstile_get_boost_stats_sysctl(req);
5166}
5167
5168static int
5169sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
5170{
5171#pragma unused(arg1, arg2, oidp)
5172 return turnstile_get_unboost_stats_sysctl(req);
5173}
5174
5175SYSCTL_PROC(_kern, OID_AUTO, turnstile_boost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
0a7de745 5176 0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats");
d9a64523 5177SYSCTL_PROC(_kern, OID_AUTO, turnstile_unboost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
0a7de745 5178 0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats");
d9a64523 5179SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_turnstile,
0a7de745
A
5180 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
5181 &thread_block_on_turnstile_count, "thread blocked on turnstile count");
d9a64523 5182SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_reg_waitq,
0a7de745
A
5183 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
5184 &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count");
d9a64523 5185
d9a64523
A
5186static int
5187sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
5188{
5189#pragma unused(arg1, arg2)
5190 int error, val = 0;
5191 error = sysctl_handle_int(oidp, &val, 0, req);
5192 if (error || val == 0) {
5193 return error;
5194 }
5195
5196 if (val == 1) {
5197 lck_mtx_test_init();
5198 erase_all_test_mtx_stats();
5199 }
5200
5201 return 0;
5202}
5203
5204static int
5205sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
5206{
5207#pragma unused(oidp, arg1, arg2)
5208 char* buffer;
5209 int size, buffer_size, error;
5210
5211 buffer_size = 1000;
f427ee49 5212 buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK);
0a7de745 5213 if (!buffer) {
d9a64523 5214 panic("Impossible to allocate memory for %s\n", __func__);
0a7de745 5215 }
d9a64523
A
5216
5217 lck_mtx_test_init();
5218
5219 size = get_test_mtx_stats_string(buffer, buffer_size);
5220
5221 error = sysctl_io_string(req, buffer, size, 0, NULL);
5222
f427ee49 5223 kheap_free(KHEAP_TEMP, buffer, buffer_size);
d9a64523
A
5224
5225 return error;
5226}
5227
5228static int
5229sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
5230{
5231#pragma unused(oidp, arg1, arg2)
5232 char* buffer;
5233 int buffer_size, offset, error, iter;
5234 char input_val[40];
5235
5236 if (!req->newptr) {
5237 return 0;
5238 }
5239
5240 if (!req->oldptr) {
5241 return EINVAL;
5242 }
5243
5244 if (req->newlen >= sizeof(input_val)) {
5245 return EINVAL;
5246 }
5247
5248 error = SYSCTL_IN(req, input_val, req->newlen);
5249 if (error) {
5250 return error;
5251 }
5252 input_val[req->newlen] = '\0';
5253
cb323159
A
5254 iter = 0;
5255 error = sscanf(input_val, "%d", &iter);
5256 if (error != 1) {
5257 printf("%s invalid input\n", __func__);
5258 return EINVAL;
5259 }
d9a64523
A
5260
5261 if (iter <= 0) {
5262 printf("%s requested %d iterations, not starting the test\n", __func__, iter);
5263 return EINVAL;
5264 }
5265
5266 lck_mtx_test_init();
5267
5268 buffer_size = 2000;
5269 offset = 0;
f427ee49 5270 buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK);
0a7de745 5271 if (!buffer) {
d9a64523 5272 panic("Impossible to allocate memory for %s\n", __func__);
0a7de745 5273 }
d9a64523
A
5274 memset(buffer, 0, buffer_size);
5275
5276 printf("%s starting uncontended mutex test with %d iterations\n", __func__, iter);
5277
4ba76501 5278 offset = scnprintf(buffer, buffer_size, "STATS INNER LOOP");
d9a64523
A
5279 offset += lck_mtx_test_mtx_uncontended(iter, &buffer[offset], buffer_size - offset);
5280
4ba76501 5281 offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
d9a64523
A
5282 offset += lck_mtx_test_mtx_uncontended_loop_time(iter, &buffer[offset], buffer_size - offset);
5283
5284 error = SYSCTL_OUT(req, buffer, offset);
5285
f427ee49 5286 kheap_free(KHEAP_TEMP, buffer, buffer_size);
d9a64523
A
5287 return error;
5288}
5289
5290static int
5291sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
5292{
5293#pragma unused(oidp, arg1, arg2)
5294 char* buffer;
5295 int buffer_size, offset, error, iter;
5296 char input_val[40];
5297
d9a64523
A
5298 if (!req->newptr) {
5299 return 0;
5300 }
5301
5302 if (!req->oldptr) {
5303 return EINVAL;
5304 }
5305
5306 if (req->newlen >= sizeof(input_val)) {
5307 return EINVAL;
5308 }
5309
5310 error = SYSCTL_IN(req, input_val, req->newlen);
5311 if (error) {
5312 return error;
5313 }
5314 input_val[req->newlen] = '\0';
5315
cb323159
A
5316 iter = 0;
5317 error = sscanf(input_val, "%d", &iter);
5318 if (error != 1) {
5319 printf("%s invalid input\n", __func__);
5320 return EINVAL;
5321 }
d9a64523
A
5322
5323 if (iter <= 0) {
5324 printf("%s requested %d iterations, not starting the test\n", __func__, iter);
5325 return EINVAL;
5326 }
5327
5328 lck_mtx_test_init();
5329
5330 erase_all_test_mtx_stats();
5331
cb323159 5332 buffer_size = 2000;
d9a64523 5333 offset = 0;
f427ee49 5334 buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK | Z_ZERO);
0a7de745 5335 if (!buffer) {
d9a64523 5336 panic("Impossible to allocate memory for %s\n", __func__);
0a7de745 5337 }
d9a64523 5338
cb323159 5339 printf("%s starting contended mutex test with %d iterations FULL_CONTENDED\n", __func__, iter);
d9a64523 5340
4ba76501 5341 offset = scnprintf(buffer, buffer_size, "STATS INNER LOOP");
cb323159
A
5342 offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset, FULL_CONTENDED);
5343
5344 printf("%s starting contended mutex loop test with %d iterations FULL_CONTENDED\n", __func__, iter);
5345
4ba76501 5346 offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
cb323159
A
5347 offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset, FULL_CONTENDED);
5348
5349 printf("%s starting contended mutex test with %d iterations HALF_CONTENDED\n", __func__, iter);
5350
4ba76501 5351 offset += scnprintf(&buffer[offset], buffer_size - offset, "STATS INNER LOOP");
cb323159 5352 offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset, HALF_CONTENDED);
d9a64523 5353
cb323159 5354 printf("%s starting contended mutex loop test with %d iterations HALF_CONTENDED\n", __func__, iter);
d9a64523 5355
4ba76501 5356 offset += scnprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
cb323159 5357 offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset, HALF_CONTENDED);
d9a64523
A
5358
5359 error = SYSCTL_OUT(req, buffer, offset);
5360
cb323159 5361 printf("\n%s\n", buffer);
f427ee49 5362 kheap_free(KHEAP_TEMP, buffer, buffer_size);
d9a64523
A
5363
5364 return error;
5365}
5366
f427ee49
A
5367SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats,
5368 CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
0a7de745 5369 0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics");
d9a64523 5370
f427ee49
A
5371SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats,
5372 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED,
0a7de745 5373 0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics");
d9a64523 5374
f427ee49
A
5375SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended,
5376 CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
0a7de745 5377 0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test");
d9a64523 5378
f427ee49
A
5379SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended,
5380 CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
0a7de745
A
5381 0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test");
5382
5383extern uint64_t MutexSpin;
5384
ea3f0419
A
5385SYSCTL_QUAD(_kern, OID_AUTO, mutex_spin_abs, CTLFLAG_RW, &MutexSpin,
5386 "Spin time in abs for acquiring a kernel mutex");
5387
5388extern uint64_t low_MutexSpin;
5389extern int64_t high_MutexSpin;
5390extern unsigned int real_ncpus;
5391
5392SYSCTL_QUAD(_kern, OID_AUTO, low_mutex_spin_abs, CTLFLAG_RW, &low_MutexSpin,
5393 "Low spin threshold in abs for acquiring a kernel mutex");
5394
5395static int
5396sysctl_high_mutex_spin_ns SYSCTL_HANDLER_ARGS
5397{
5398#pragma unused(oidp, arg1, arg2)
5399 int error;
5400 int64_t val = 0;
5401 int64_t res;
5402
5403 /* Check if the user is writing to high_MutexSpin, or just reading it */
5404 if (req->newptr) {
5405 error = SYSCTL_IN(req, &val, sizeof(val));
5406 if (error || (val < 0 && val != -1)) {
5407 return error;
5408 }
5409 high_MutexSpin = val;
5410 }
5411
5412 if (high_MutexSpin >= 0) {
5413 res = high_MutexSpin;
5414 } else {
5415 res = low_MutexSpin * real_ncpus;
5416 }
5417 return SYSCTL_OUT(req, &res, sizeof(res));
5418}
5419SYSCTL_PROC(_kern, OID_AUTO, high_mutex_spin_abs, CTLFLAG_RW | CTLTYPE_QUAD, 0, 0, sysctl_high_mutex_spin_ns, "I",
5420 "High spin threshold in abs for acquiring a kernel mutex");
d9a64523 5421
f427ee49 5422
d9a64523
A
5423#if defined (__x86_64__)
5424
5425semaphore_t sysctl_test_panic_with_thread_sem;
5426
5427#pragma clang diagnostic push
5428#pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
5429__attribute__((noreturn))
5430static void
5431panic_thread_test_child_spin(void * arg, wait_result_t wres)
5432{
5433 static int panic_thread_recurse_count = 5;
5434
5435 if (panic_thread_recurse_count > 0) {
5436 panic_thread_recurse_count--;
5437 panic_thread_test_child_spin(arg, wres);
5438 }
5439
5440 semaphore_signal(sysctl_test_panic_with_thread_sem);
0a7de745
A
5441 while (1) {
5442 ;
5443 }
d9a64523
A
5444}
5445#pragma clang diagnostic pop
5446
5447static void
5448panic_thread_test_child_park(void * arg __unused, wait_result_t wres __unused)
5449{
5450 int event;
5451
5452 assert_wait(&event, THREAD_UNINT);
5453 semaphore_signal(sysctl_test_panic_with_thread_sem);
5454 thread_block(panic_thread_test_child_park);
5455}
5456
5457static int
5458sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
5459{
5460#pragma unused(arg1, arg2)
5461 int rval = 0;
5462 char str[16] = { '\0' };
5463 thread_t child_thread = THREAD_NULL;
5464
5465 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
5466 if (rval != 0 || !req->newptr) {
5467 return EINVAL;
5468 }
5469
5470 semaphore_create(kernel_task, &sysctl_test_panic_with_thread_sem, SYNC_POLICY_FIFO, 0);
5471
5472 /* Create thread to spin or park in continuation */
5473 if (strncmp("spin", str, strlen("spin")) == 0) {
5474 if (kernel_thread_start(panic_thread_test_child_spin, NULL, &child_thread) != KERN_SUCCESS) {
5475 semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
5476 return EBUSY;
5477 }
5478 } else if (strncmp("continuation", str, strlen("continuation")) == 0) {
5479 if (kernel_thread_start(panic_thread_test_child_park, NULL, &child_thread) != KERN_SUCCESS) {
5480 semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
5481 return EBUSY;
5482 }
5483 } else {
5484 semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
5485 return EINVAL;
5486 }
5487
5488 semaphore_wait(sysctl_test_panic_with_thread_sem);
5489
5490 panic_with_thread_context(0, NULL, 0, child_thread, "testing panic_with_thread_context for thread %p", child_thread);
5491
5492 /* Not reached */
5493 return EINVAL;
5494}
5495
f427ee49
A
5496SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread,
5497 CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING,
0a7de745 5498 0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread");
d9a64523 5499#endif /* defined (__x86_64__) */
cb323159 5500
d9a64523 5501#endif /* DEVELOPMENT || DEBUG */
cb323159
A
5502
5503static int
5504sysctl_get_owned_vmobjects SYSCTL_HANDLER_ARGS
5505{
5506#pragma unused(oidp, arg1, arg2)
5507
5508 /* validate */
5509 if (req->newlen != sizeof(mach_port_name_t) || req->newptr == USER_ADDR_NULL ||
5510 req->oldidx != 0 || req->newidx != 0 || req->p == NULL) {
5511 return EINVAL;
5512 }
5513
5514 int error;
5515 mach_port_name_t task_port_name;
5516 task_t task;
bca245ac 5517 size_t buffer_size = (req->oldptr != USER_ADDR_NULL) ? req->oldlen : 0;
c3c9b80d 5518 vmobject_list_output_t buffer = NULL;
cb323159 5519 size_t output_size;
f427ee49 5520 size_t entries;
cb323159 5521
c3c9b80d
A
5522 /* we have a "newptr" (for write) we get a task port name from the caller. */
5523 error = SYSCTL_IN(req, &task_port_name, sizeof(mach_port_name_t));
5524
5525 if (error != 0) {
5526 goto sysctl_get_vmobject_list_exit;
5527 }
5528
5529 task = port_name_to_task_read(task_port_name);
5530 if (task == TASK_NULL) {
5531 error = ESRCH;
5532 goto sysctl_get_vmobject_list_exit;
5533 }
5534
5535 /* get the current size */
5536 task_copy_vmobjects(task, NULL, 0, &entries);
5537 size_t max_size = (entries > 0) ? entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
5538
5539 /* if buffer_size is specified clamp to the current size then allcoate the kernel buffer */
cb323159 5540 if (buffer_size) {
f427ee49 5541 if (buffer_size < sizeof(*buffer) + sizeof(vm_object_query_data_t)) {
c3c9b80d
A
5542 error = ENOMEM;
5543 goto sysctl_get_vmobject_list_deallocate_and_exit;
cb323159
A
5544 }
5545
c3c9b80d 5546 buffer_size = (buffer_size > max_size) ? max_size : buffer_size;
f427ee49 5547 buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK);
cb323159
A
5548
5549 if (!buffer) {
5550 error = ENOMEM;
c3c9b80d 5551 goto sysctl_get_vmobject_list_deallocate_and_exit;
cb323159
A
5552 }
5553 } else {
5554 buffer = NULL;
5555 }
5556
cb323159
A
5557 /* copy the vmobjects and vmobject data out of the task */
5558 if (buffer_size == 0) {
c3c9b80d 5559 output_size = max_size;
cb323159 5560 } else {
f427ee49
A
5561 task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), &entries);
5562 buffer->entries = (uint64_t)entries;
5563 output_size = entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
cb323159
A
5564 }
5565
cb323159
A
5566 error = SYSCTL_OUT(req, (char*) buffer, output_size);
5567
c3c9b80d
A
5568sysctl_get_vmobject_list_deallocate_and_exit:
5569 task_deallocate(task);
5570
cb323159
A
5571sysctl_get_vmobject_list_exit:
5572 if (buffer) {
f427ee49 5573 kheap_free(KHEAP_TEMP, buffer, buffer_size);
cb323159
A
5574 }
5575
5576 return error;
5577}
5578
f427ee49
A
5579SYSCTL_PROC(_vm, OID_AUTO, get_owned_vmobjects,
5580 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
cb323159 5581 0, 0, sysctl_get_owned_vmobjects, "A", "get owned vmobjects in task");
c3c9b80d
A
5582
5583extern uint64_t num_static_scalable_counters;
5584SYSCTL_QUAD(_kern, OID_AUTO, num_static_scalable_counters, CTLFLAG_RD | CTLFLAG_LOCKED, &num_static_scalable_counters, "");
5585
5586uuid_string_t trial_treatment_id;
5587uuid_string_t trial_experiment_id;
5588int trial_deployment_id = -1;
5589
5590SYSCTL_STRING(_kern, OID_AUTO, trial_treatment_id, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLFLAG_EXPERIMENT, trial_treatment_id, sizeof(trial_treatment_id), "");
5591SYSCTL_STRING(_kern, OID_AUTO, trial_experiment_id, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLFLAG_EXPERIMENT, trial_experiment_id, sizeof(trial_experiment_id), "");
5592SYSCTL_INT(_kern, OID_AUTO, trial_deployment_id, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLFLAG_EXPERIMENT, &trial_deployment_id, 0, "");
5593
5594#if DEVELOPMENT || DEBUG
5595/* For unit testing setting factors & limits. */
5596unsigned int testing_experiment_factor;
5597EXPERIMENT_FACTOR_UINT(_kern, testing_experiment_factor, &testing_experiment_factor, 5, 10, "");
5598#endif /* DEVELOPMENT || DEBUG */