]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
xnu-4903.231.4.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
108
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
111
112 #include <machine/smp.h>
113 #include <mach/machine.h>
114 #include <mach/mach_host.h>
115 #include <mach/mach_types.h>
116 #include <mach/processor_info.h>
117 #include <mach/vm_param.h>
118 #include <kern/debug.h>
119 #include <kern/mach_param.h>
120 #include <kern/task.h>
121 #include <kern/thread.h>
122 #include <kern/thread_group.h>
123 #include <kern/processor.h>
124 #include <kern/cpu_number.h>
125 #include <kern/cpu_quiesce.h>
126 #include <kern/debug.h>
127 #include <kern/sched_prim.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_map.h>
130 #include <mach/host_info.h>
131
132 #include <sys/mount_internal.h>
133 #include <sys/kdebug.h>
134
135 #include <IOKit/IOPlatformExpert.h>
136 #include <pexpert/pexpert.h>
137
138 #include <machine/machine_routines.h>
139 #include <machine/exec.h>
140
141 #include <vm/vm_protos.h>
142 #include <vm/vm_pageout.h>
143 #include <vm/vm_compressor_algorithms.h>
144 #include <sys/imgsrc.h>
145 #include <kern/timer_call.h>
146
147 #if defined(__i386__) || defined(__x86_64__)
148 #include <i386/cpuid.h>
149 #endif
150
151 #if CONFIG_FREEZE
152 #include <sys/kern_memorystatus.h>
153 #endif
154
155 #if KPERF
156 #include <kperf/kperf.h>
157 #endif
158
159 #if HYPERVISOR
160 #include <kern/hv_support.h>
161 #endif
162
163 /*
164 * deliberately setting max requests to really high number
165 * so that runaway settings do not cause MALLOC overflows
166 */
167 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
168
169 extern int aio_max_requests;
170 extern int aio_max_requests_per_process;
171 extern int aio_worker_threads;
172 extern int lowpri_IO_window_msecs;
173 extern int lowpri_IO_delay_msecs;
174 extern int nx_enabled;
175 extern int speculative_reads_disabled;
176 extern unsigned int speculative_prefetch_max;
177 extern unsigned int speculative_prefetch_max_iosize;
178 extern unsigned int preheat_max_bytes;
179 extern unsigned int preheat_min_bytes;
180 extern long numvnodes;
181
182 extern uuid_string_t bootsessionuuid_string;
183
184 extern unsigned int vm_max_delayed_work_limit;
185 extern unsigned int vm_max_batch;
186
187 extern unsigned int vm_page_free_min;
188 extern unsigned int vm_page_free_target;
189 extern unsigned int vm_page_free_reserved;
190
191 #if (DEVELOPMENT || DEBUG)
192 extern uint32_t vm_page_creation_throttled_hard;
193 extern uint32_t vm_page_creation_throttled_soft;
194 #endif /* DEVELOPMENT || DEBUG */
195
196 /*
197 * Conditionally allow dtrace to see these functions for debugging purposes.
198 */
199 #ifdef STATIC
200 #undef STATIC
201 #endif
202 #if 0
203 #define STATIC
204 #else
205 #define STATIC static
206 #endif
207
208 extern boolean_t mach_timer_coalescing_enabled;
209
210 extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
211
212 STATIC void
213 fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
214 STATIC void
215 fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
216 STATIC void
217 fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
218 STATIC void
219 fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
220 STATIC void
221 fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
222 STATIC void
223 fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
224
225 extern int
226 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
227 #if NFSCLIENT
228 extern int
229 netboot_root(void);
230 #endif
231 int
232 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
233 proc_t p);
234 int
235 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
236 size_t *sizep, proc_t cur_proc);
237 STATIC int
238 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
239 proc_t cur_proc, int argc_yes);
240 int
241 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
242 size_t newlen, void *sp, int len);
243
244 STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
245 STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
246 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
247 STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
248 STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
249 int sysdoproc_callback(proc_t p, void *arg);
250
251
252 /* forward declarations for non-static STATIC */
253 STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
254 STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
255 STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
256 STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
257 STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
258 STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
259 #if COUNT_SYSCALLS
260 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
261 #endif /* COUNT_SYSCALLS */
262 #if !CONFIG_EMBEDDED
263 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
264 #endif /* !CONFIG_EMBEDDED */
265 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
266 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
267 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
268 STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269 STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270 STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271 STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272 STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273 STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274 STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275 STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
276 STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277 STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278 STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
279 STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280 #if NFSCLIENT
281 STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
282 #endif
283 #ifdef CONFIG_IMGSRC_ACCESS
284 STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
285 #endif
286 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
288 #if CONFIG_COREDUMP
289 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291 #endif
292 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
293 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295 STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296 STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297 STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298 STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299 STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
300 STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
301 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
302 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
303 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
304 STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
305 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
306
307 #ifdef CONFIG_XNUPOST
308 #include <tests/xnupost.h>
309
310 STATIC int sysctl_debug_test_oslog_ctl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
311 STATIC int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
312 STATIC int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
313 #endif
314
315 extern void IORegistrySetOSBuildVersion(char * build_version);
316
317 STATIC void
318 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
319 {
320 la64->ldavg[0] = la->ldavg[0];
321 la64->ldavg[1] = la->ldavg[1];
322 la64->ldavg[2] = la->ldavg[2];
323 la64->fscale = (user64_long_t)la->fscale;
324 }
325
326 STATIC void
327 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
328 {
329 la32->ldavg[0] = la->ldavg[0];
330 la32->ldavg[1] = la->ldavg[1];
331 la32->ldavg[2] = la->ldavg[2];
332 la32->fscale = (user32_long_t)la->fscale;
333 }
334
335 #if CONFIG_COREDUMP
336 /*
337 * Attributes stored in the kernel.
338 */
339 extern char corefilename[MAXPATHLEN+1];
340 extern int do_coredump;
341 extern int sugid_coredump;
342 #endif
343
344 #if COUNT_SYSCALLS
345 extern int do_count_syscalls;
346 #endif
347
348 #ifdef INSECURE
349 int securelevel = -1;
350 #else
351 int securelevel;
352 #endif
353
354 STATIC int
355 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
356 __unused int arg2, struct sysctl_req *req)
357 {
358 int error;
359 struct uthread *ut = get_bsdthread_info(current_thread());
360 user_addr_t oldp=0, newp=0;
361 size_t *oldlenp=NULL;
362 size_t newlen=0;
363
364 oldp = req->oldptr;
365 oldlenp = &(req->oldlen);
366 newp = req->newptr;
367 newlen = req->newlen;
368
369 /* We want the current length, and maybe the string itself */
370 if(oldlenp) {
371 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
372 size_t currlen = MAXTHREADNAMESIZE - 1;
373
374 if(ut->pth_name)
375 /* use length of current thread name */
376 currlen = strlen(ut->pth_name);
377 if(oldp) {
378 if(*oldlenp < currlen)
379 return ENOMEM;
380 /* NOTE - we do not copy the NULL terminator */
381 if(ut->pth_name) {
382 error = copyout(ut->pth_name,oldp,currlen);
383 if(error)
384 return error;
385 }
386 }
387 /* return length of thread name minus NULL terminator (just like strlen) */
388 req->oldidx = currlen;
389 }
390
391 /* We want to set the name to something */
392 if(newp)
393 {
394 if(newlen > (MAXTHREADNAMESIZE - 1))
395 return ENAMETOOLONG;
396 if(!ut->pth_name)
397 {
398 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
399 if(!ut->pth_name)
400 return ENOMEM;
401 } else {
402 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
403 }
404 bzero(ut->pth_name, MAXTHREADNAMESIZE);
405 error = copyin(newp, ut->pth_name, newlen);
406 if (error) {
407 return error;
408 }
409
410 kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
411 }
412
413 return 0;
414 }
415
416 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
417
418 #define BSD_HOST 1
419 STATIC int
420 sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
421 {
422 host_basic_info_data_t hinfo;
423 kern_return_t kret;
424 uint32_t size;
425 int changed;
426 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
427 struct _processor_statistics_np *buf;
428 int error;
429
430 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
431 if (kret != KERN_SUCCESS) {
432 return EINVAL;
433 }
434
435 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
436
437 if (req->oldlen < size) {
438 return EINVAL;
439 }
440
441 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
442
443 kret = get_sched_statistics(buf, &size);
444 if (kret != KERN_SUCCESS) {
445 error = EINVAL;
446 goto out;
447 }
448
449 error = sysctl_io_opaque(req, buf, size, &changed);
450 if (error) {
451 goto out;
452 }
453
454 if (changed) {
455 panic("Sched info changed?!");
456 }
457 out:
458 FREE(buf, M_TEMP);
459 return error;
460 }
461
462 SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
463
464 STATIC int
465 sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
466 {
467 boolean_t active;
468 int res;
469
470 if (req->newlen != sizeof(active)) {
471 return EINVAL;
472 }
473
474 res = copyin(req->newptr, &active, sizeof(active));
475 if (res != 0) {
476 return res;
477 }
478
479 return set_sched_stats_active(active);
480 }
481
482 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
483
484 extern uint32_t sched_debug_flags;
485 SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
486
487 #if (DEBUG || DEVELOPMENT)
488 extern boolean_t doprnt_hide_pointers;
489 SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
490 #endif
491
492 extern int get_kernel_symfile(proc_t, char **);
493
494 #if COUNT_SYSCALLS
495 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
496
497 extern unsigned int nsysent;
498 extern int syscalls_log[];
499 extern const char *syscallnames[];
500
501 STATIC int
502 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
503 {
504 __unused int cmd = oidp->oid_arg2; /* subcommand*/
505 __unused int *name = arg1; /* oid element argument vector */
506 __unused int namelen = arg2; /* number of oid element arguments */
507 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
508 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
509 user_addr_t newp = req->newptr; /* user buffer copy in address */
510 size_t newlen = req->newlen; /* user buffer copy in size */
511 int error;
512
513 int tmp;
514
515 /* valid values passed in:
516 * = 0 means don't keep called counts for each bsd syscall
517 * > 0 means keep called counts for each bsd syscall
518 * = 2 means dump current counts to the system log
519 * = 3 means reset all counts
520 * for example, to dump current counts:
521 * sysctl -w kern.count_calls=2
522 */
523 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
524 if ( error != 0 ) {
525 return (error);
526 }
527
528 if ( tmp == 1 ) {
529 do_count_syscalls = 1;
530 }
531 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
532 int i;
533 for ( i = 0; i < nsysent; i++ ) {
534 if ( syscalls_log[i] != 0 ) {
535 if ( tmp == 2 ) {
536 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
537 }
538 else {
539 syscalls_log[i] = 0;
540 }
541 }
542 }
543 if ( tmp != 0 ) {
544 do_count_syscalls = 1;
545 }
546 }
547
548 /* adjust index so we return the right required/consumed amount */
549 if (!error)
550 req->oldidx += req->oldlen;
551
552 return (error);
553 }
554 SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
555 0, /* Pointer argument (arg1) */
556 0, /* Integer argument (arg2) */
557 sysctl_docountsyscalls, /* Handler function */
558 NULL, /* Data pointer */
559 "");
560 #endif /* COUNT_SYSCALLS */
561
562 /*
563 * The following sysctl_* functions should not be used
564 * any more, as they can only cope with callers in
565 * user mode: Use new-style
566 * sysctl_io_number()
567 * sysctl_io_string()
568 * sysctl_io_opaque()
569 * instead.
570 */
571
572 /*
573 * Validate parameters and get old / set new parameters
574 * for an integer-valued sysctl function.
575 */
576 int
577 sysctl_int(user_addr_t oldp, size_t *oldlenp,
578 user_addr_t newp, size_t newlen, int *valp)
579 {
580 int error = 0;
581
582 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
583 return (EFAULT);
584 if (oldp && *oldlenp < sizeof(int))
585 return (ENOMEM);
586 if (newp && newlen != sizeof(int))
587 return (EINVAL);
588 *oldlenp = sizeof(int);
589 if (oldp)
590 error = copyout(valp, oldp, sizeof(int));
591 if (error == 0 && newp) {
592 error = copyin(newp, valp, sizeof(int));
593 AUDIT_ARG(value32, *valp);
594 }
595 return (error);
596 }
597
598 /*
599 * Validate parameters and get old / set new parameters
600 * for an quad(64bit)-valued sysctl function.
601 */
602 int
603 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
604 user_addr_t newp, size_t newlen, quad_t *valp)
605 {
606 int error = 0;
607
608 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
609 return (EFAULT);
610 if (oldp && *oldlenp < sizeof(quad_t))
611 return (ENOMEM);
612 if (newp && newlen != sizeof(quad_t))
613 return (EINVAL);
614 *oldlenp = sizeof(quad_t);
615 if (oldp)
616 error = copyout(valp, oldp, sizeof(quad_t));
617 if (error == 0 && newp)
618 error = copyin(newp, valp, sizeof(quad_t));
619 return (error);
620 }
621
622 STATIC int
623 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
624 {
625 if (p->p_pid != (pid_t)*(int*)arg)
626 return(0);
627 else
628 return(1);
629 }
630
631 STATIC int
632 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
633 {
634 if (p->p_pgrpid != (pid_t)*(int*)arg)
635 return(0);
636 else
637 return(1);
638 }
639
640 STATIC int
641 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
642 {
643 int retval;
644 struct tty *tp;
645
646 /* This is very racy but list lock is held.. Hmmm. */
647 if ((p->p_flag & P_CONTROLT) == 0 ||
648 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
649 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
650 tp->t_dev != (dev_t)*(int*)arg)
651 retval = 0;
652 else
653 retval = 1;
654
655 return(retval);
656 }
657
658 STATIC int
659 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
660 {
661 kauth_cred_t my_cred;
662 uid_t uid;
663
664 if (p->p_ucred == NULL)
665 return(0);
666 my_cred = kauth_cred_proc_ref(p);
667 uid = kauth_cred_getuid(my_cred);
668 kauth_cred_unref(&my_cred);
669
670 if (uid != (uid_t)*(int*)arg)
671 return(0);
672 else
673 return(1);
674 }
675
676
677 STATIC int
678 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
679 {
680 kauth_cred_t my_cred;
681 uid_t ruid;
682
683 if (p->p_ucred == NULL)
684 return(0);
685 my_cred = kauth_cred_proc_ref(p);
686 ruid = kauth_cred_getruid(my_cred);
687 kauth_cred_unref(&my_cred);
688
689 if (ruid != (uid_t)*(int*)arg)
690 return(0);
691 else
692 return(1);
693 }
694
695 /*
696 * try over estimating by 5 procs
697 */
698 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
699 struct sysdoproc_args {
700 int buflen;
701 void *kprocp;
702 boolean_t is_64_bit;
703 user_addr_t dp;
704 size_t needed;
705 int sizeof_kproc;
706 int *errorp;
707 int uidcheck;
708 int ruidcheck;
709 int ttycheck;
710 int uidval;
711 };
712
713 int
714 sysdoproc_callback(proc_t p, void *arg)
715 {
716 struct sysdoproc_args *args = arg;
717
718 if (args->buflen >= args->sizeof_kproc) {
719 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
720 return (PROC_RETURNED);
721 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
722 return (PROC_RETURNED);
723 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
724 return (PROC_RETURNED);
725
726 bzero(args->kprocp, args->sizeof_kproc);
727 if (args->is_64_bit)
728 fill_user64_proc(p, args->kprocp);
729 else
730 fill_user32_proc(p, args->kprocp);
731 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
732 if (error) {
733 *args->errorp = error;
734 return (PROC_RETURNED_DONE);
735 }
736 args->dp += args->sizeof_kproc;
737 args->buflen -= args->sizeof_kproc;
738 }
739 args->needed += args->sizeof_kproc;
740 return (PROC_RETURNED);
741 }
742
743 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
744 STATIC int
745 sysctl_prochandle SYSCTL_HANDLER_ARGS
746 {
747 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
748 int *name = arg1; /* oid element argument vector */
749 int namelen = arg2; /* number of oid element arguments */
750 user_addr_t where = req->oldptr;/* user buffer copy out address */
751
752 user_addr_t dp = where;
753 size_t needed = 0;
754 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
755 int error = 0;
756 boolean_t is_64_bit = proc_is64bit(current_proc());
757 struct user32_kinfo_proc user32_kproc;
758 struct user64_kinfo_proc user_kproc;
759 int sizeof_kproc;
760 void *kprocp;
761 int (*filterfn)(proc_t, void *) = 0;
762 struct sysdoproc_args args;
763 int uidcheck = 0;
764 int ruidcheck = 0;
765 int ttycheck = 0;
766 int success = 0;
767
768 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
769 return (EINVAL);
770
771 if (is_64_bit) {
772 sizeof_kproc = sizeof(user_kproc);
773 kprocp = &user_kproc;
774 } else {
775 sizeof_kproc = sizeof(user32_kproc);
776 kprocp = &user32_kproc;
777 }
778
779 switch (cmd) {
780
781 case KERN_PROC_PID:
782 filterfn = sysdoproc_filt_KERN_PROC_PID;
783 break;
784
785 case KERN_PROC_PGRP:
786 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
787 break;
788
789 case KERN_PROC_TTY:
790 ttycheck = 1;
791 break;
792
793 case KERN_PROC_UID:
794 uidcheck = 1;
795 break;
796
797 case KERN_PROC_RUID:
798 ruidcheck = 1;
799 break;
800
801 case KERN_PROC_ALL:
802 break;
803
804 default:
805 /* must be kern.proc.<unknown> */
806 return (ENOTSUP);
807 }
808
809 error = 0;
810 args.buflen = buflen;
811 args.kprocp = kprocp;
812 args.is_64_bit = is_64_bit;
813 args.dp = dp;
814 args.needed = needed;
815 args.errorp = &error;
816 args.uidcheck = uidcheck;
817 args.ruidcheck = ruidcheck;
818 args.ttycheck = ttycheck;
819 args.sizeof_kproc = sizeof_kproc;
820 if (namelen)
821 args.uidval = name[0];
822
823 success = proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
824 sysdoproc_callback, &args, filterfn, name);
825
826 /*
827 * rdar://problem/28433391: if we can't iterate over the processes,
828 * make sure to return an error.
829 */
830
831 if (success != 0)
832 return (ENOMEM);
833
834 if (error)
835 return (error);
836
837 dp = args.dp;
838 needed = args.needed;
839
840 if (where != USER_ADDR_NULL) {
841 req->oldlen = dp - where;
842 if (needed > req->oldlen)
843 return (ENOMEM);
844 } else {
845 needed += KERN_PROCSLOP;
846 req->oldlen = needed;
847 }
848 /* adjust index so we return the right required/consumed amount */
849 req->oldidx += req->oldlen;
850 return (0);
851 }
852
853 /*
854 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
855 * in the sysctl declaration itself, which comes into the handler function
856 * as 'oidp->oid_arg2'.
857 *
858 * For these particular sysctls, since they have well known OIDs, we could
859 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
860 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
861 * of a well known value with a common handler function. This is desirable,
862 * because we want well known values to "go away" at some future date.
863 *
864 * It should be noted that the value of '((int *)arg1)[1]' is used for many
865 * an integer parameter to the subcommand for many of these sysctls; we'd
866 * rather have used '((int *)arg1)[0]' for that, or even better, an element
867 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
868 * and then use leaf-node permissions enforcement, but that would have
869 * necessitated modifying user space code to correspond to the interface
870 * change, and we are striving for binary backward compatibility here; even
871 * though these are SPI, and not intended for use by user space applications
872 * which are not themselves system tools or libraries, some applications
873 * have erroneously used them.
874 */
875 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_ALL, /* Integer argument (arg2) */
878 sysctl_prochandle, /* Handler function */
879 NULL, /* Data is size variant on ILP32/LP64 */
880 "");
881 SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_PID, /* Integer argument (arg2) */
884 sysctl_prochandle, /* Handler function */
885 NULL, /* Data is size variant on ILP32/LP64 */
886 "");
887 SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_TTY, /* Integer argument (arg2) */
890 sysctl_prochandle, /* Handler function */
891 NULL, /* Data is size variant on ILP32/LP64 */
892 "");
893 SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
894 0, /* Pointer argument (arg1) */
895 KERN_PROC_PGRP, /* Integer argument (arg2) */
896 sysctl_prochandle, /* Handler function */
897 NULL, /* Data is size variant on ILP32/LP64 */
898 "");
899 SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
900 0, /* Pointer argument (arg1) */
901 KERN_PROC_UID, /* Integer argument (arg2) */
902 sysctl_prochandle, /* Handler function */
903 NULL, /* Data is size variant on ILP32/LP64 */
904 "");
905 SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_RUID, /* Integer argument (arg2) */
908 sysctl_prochandle, /* Handler function */
909 NULL, /* Data is size variant on ILP32/LP64 */
910 "");
911 SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
912 0, /* Pointer argument (arg1) */
913 KERN_PROC_LCID, /* Integer argument (arg2) */
914 sysctl_prochandle, /* Handler function */
915 NULL, /* Data is size variant on ILP32/LP64 */
916 "");
917
918
919 /*
920 * Fill in non-zero fields of an eproc structure for the specified process.
921 */
922 STATIC void
923 fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
924 {
925 struct tty *tp;
926 struct pgrp *pg;
927 struct session *sessp;
928 kauth_cred_t my_cred;
929
930 pg = proc_pgrp(p);
931 sessp = proc_session(p);
932
933 if (pg != PGRP_NULL) {
934 ep->e_pgid = p->p_pgrpid;
935 ep->e_jobc = pg->pg_jobc;
936 if (sessp != SESSION_NULL && sessp->s_ttyvp)
937 ep->e_flag = EPROC_CTTY;
938 }
939 ep->e_ppid = p->p_ppid;
940 if (p->p_ucred) {
941 my_cred = kauth_cred_proc_ref(p);
942
943 /* A fake historical pcred */
944 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
945 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
946 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
947 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
948
949 /* A fake historical *kauth_cred_t */
950 ep->e_ucred.cr_ref = my_cred->cr_ref;
951 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
952 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
953 bcopy(posix_cred_get(my_cred)->cr_groups,
954 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
955
956 kauth_cred_unref(&my_cred);
957 }
958
959 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
960 (tp = SESSION_TP(sessp))) {
961 ep->e_tdev = tp->t_dev;
962 ep->e_tpgid = sessp->s_ttypgrpid;
963 } else
964 ep->e_tdev = NODEV;
965
966 if (sessp != SESSION_NULL) {
967 if (SESS_LEADER(p, sessp))
968 ep->e_flag |= EPROC_SLEADER;
969 session_rele(sessp);
970 }
971 if (pg != PGRP_NULL)
972 pg_rele(pg);
973 }
974
975 /*
976 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
977 */
978 STATIC void
979 fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
980 {
981 struct tty *tp;
982 struct pgrp *pg;
983 struct session *sessp;
984 kauth_cred_t my_cred;
985
986 pg = proc_pgrp(p);
987 sessp = proc_session(p);
988
989 if (pg != PGRP_NULL) {
990 ep->e_pgid = p->p_pgrpid;
991 ep->e_jobc = pg->pg_jobc;
992 if (sessp != SESSION_NULL && sessp->s_ttyvp)
993 ep->e_flag = EPROC_CTTY;
994 }
995 ep->e_ppid = p->p_ppid;
996 if (p->p_ucred) {
997 my_cred = kauth_cred_proc_ref(p);
998
999 /* A fake historical pcred */
1000 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
1001 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
1002 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
1003 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
1004
1005 /* A fake historical *kauth_cred_t */
1006 ep->e_ucred.cr_ref = my_cred->cr_ref;
1007 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1008 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
1009 bcopy(posix_cred_get(my_cred)->cr_groups,
1010 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
1011
1012 kauth_cred_unref(&my_cred);
1013 }
1014
1015 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1016 (tp = SESSION_TP(sessp))) {
1017 ep->e_tdev = tp->t_dev;
1018 ep->e_tpgid = sessp->s_ttypgrpid;
1019 } else
1020 ep->e_tdev = NODEV;
1021
1022 if (sessp != SESSION_NULL) {
1023 if (SESS_LEADER(p, sessp))
1024 ep->e_flag |= EPROC_SLEADER;
1025 session_rele(sessp);
1026 }
1027 if (pg != PGRP_NULL)
1028 pg_rele(pg);
1029 }
1030
1031 /*
1032 * Fill in an eproc structure for the specified process.
1033 * bzeroed by our caller, so only set non-zero fields.
1034 */
1035 STATIC void
1036 fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1037 {
1038 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1039 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1040 exp->p_flag = p->p_flag;
1041 if (p->p_lflag & P_LTRACED)
1042 exp->p_flag |= P_TRACED;
1043 if (p->p_lflag & P_LPPWAIT)
1044 exp->p_flag |= P_PPWAIT;
1045 if (p->p_lflag & P_LEXIT)
1046 exp->p_flag |= P_WEXIT;
1047 exp->p_stat = p->p_stat;
1048 exp->p_pid = p->p_pid;
1049 exp->p_oppid = p->p_oppid;
1050 /* Mach related */
1051 exp->user_stack = p->user_stack;
1052 exp->p_debugger = p->p_debugger;
1053 exp->sigwait = p->sigwait;
1054 /* scheduling */
1055 #ifdef _PROC_HAS_SCHEDINFO_
1056 exp->p_estcpu = p->p_estcpu;
1057 exp->p_pctcpu = p->p_pctcpu;
1058 exp->p_slptime = p->p_slptime;
1059 #endif
1060 exp->p_realtimer.it_interval.tv_sec =
1061 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1062 exp->p_realtimer.it_interval.tv_usec =
1063 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1064
1065 exp->p_realtimer.it_value.tv_sec =
1066 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1067 exp->p_realtimer.it_value.tv_usec =
1068 (__int32_t)p->p_realtimer.it_value.tv_usec;
1069
1070 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1071 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1072
1073 exp->p_sigignore = p->p_sigignore;
1074 exp->p_sigcatch = p->p_sigcatch;
1075 exp->p_priority = p->p_priority;
1076 exp->p_nice = p->p_nice;
1077 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1078 exp->p_xstat = p->p_xstat;
1079 exp->p_acflag = p->p_acflag;
1080 }
1081
1082 /*
1083 * Fill in an LP64 version of extern_proc structure for the specified process.
1084 */
1085 STATIC void
1086 fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1087 {
1088 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1089 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1090 exp->p_flag = p->p_flag;
1091 if (p->p_lflag & P_LTRACED)
1092 exp->p_flag |= P_TRACED;
1093 if (p->p_lflag & P_LPPWAIT)
1094 exp->p_flag |= P_PPWAIT;
1095 if (p->p_lflag & P_LEXIT)
1096 exp->p_flag |= P_WEXIT;
1097 exp->p_stat = p->p_stat;
1098 exp->p_pid = p->p_pid;
1099 exp->p_oppid = p->p_oppid;
1100 /* Mach related */
1101 exp->user_stack = p->user_stack;
1102 exp->p_debugger = p->p_debugger;
1103 exp->sigwait = p->sigwait;
1104 /* scheduling */
1105 #ifdef _PROC_HAS_SCHEDINFO_
1106 exp->p_estcpu = p->p_estcpu;
1107 exp->p_pctcpu = p->p_pctcpu;
1108 exp->p_slptime = p->p_slptime;
1109 #endif
1110 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1111 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1112
1113 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1114 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1115
1116 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1117 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1118
1119 exp->p_sigignore = p->p_sigignore;
1120 exp->p_sigcatch = p->p_sigcatch;
1121 exp->p_priority = p->p_priority;
1122 exp->p_nice = p->p_nice;
1123 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1124 exp->p_xstat = p->p_xstat;
1125 exp->p_acflag = p->p_acflag;
1126 }
1127
1128 STATIC void
1129 fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1130 {
1131 /* on a 64 bit kernel, 32 bit users get some truncated information */
1132 fill_user32_externproc(p, &kp->kp_proc);
1133 fill_user32_eproc(p, &kp->kp_eproc);
1134 }
1135
1136 STATIC void
1137 fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1138 {
1139 fill_user64_externproc(p, &kp->kp_proc);
1140 fill_user64_eproc(p, &kp->kp_eproc);
1141 }
1142
1143 STATIC int
1144 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1145 {
1146 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1147 int *name = arg1; /* oid element argument vector */
1148 int namelen = arg2; /* number of oid element arguments */
1149 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1150 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1151 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1152 // size_t newlen = req->newlen; /* user buffer copy in size */
1153
1154 int ret=0;
1155
1156 if (namelen == 0)
1157 return(ENOTSUP);
1158
1159 switch(name[0]) {
1160 case KERN_KDEFLAGS:
1161 case KERN_KDDFLAGS:
1162 case KERN_KDENABLE:
1163 case KERN_KDGETBUF:
1164 case KERN_KDSETUP:
1165 case KERN_KDREMOVE:
1166 case KERN_KDSETREG:
1167 case KERN_KDGETREG:
1168 case KERN_KDREADTR:
1169 case KERN_KDWRITETR:
1170 case KERN_KDWRITEMAP:
1171 case KERN_KDTEST:
1172 case KERN_KDPIDTR:
1173 case KERN_KDTHRMAP:
1174 case KERN_KDPIDEX:
1175 case KERN_KDSETBUF:
1176 case KERN_KDREADCURTHRMAP:
1177 case KERN_KDSET_TYPEFILTER:
1178 case KERN_KDBUFWAIT:
1179 case KERN_KDCPUMAP:
1180 case KERN_KDWRITEMAP_V3:
1181 case KERN_KDWRITETR_V3:
1182 ret = kdbg_control(name, namelen, oldp, oldlenp);
1183 break;
1184 default:
1185 ret= ENOTSUP;
1186 break;
1187 }
1188
1189 /* adjust index so we return the right required/consumed amount */
1190 if (!ret)
1191 req->oldidx += req->oldlen;
1192
1193 return (ret);
1194 }
1195 SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1196 0, /* Pointer argument (arg1) */
1197 0, /* Integer argument (arg2) */
1198 sysctl_kdebug_ops, /* Handler function */
1199 NULL, /* Data pointer */
1200 "");
1201
1202
1203 #if !CONFIG_EMBEDDED
1204 /*
1205 * Return the top *sizep bytes of the user stack, or the entire area of the
1206 * user stack down through the saved exec_path, whichever is smaller.
1207 */
1208 STATIC int
1209 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1210 {
1211 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1212 int *name = arg1; /* oid element argument vector */
1213 int namelen = arg2; /* number of oid element arguments */
1214 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1215 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1216 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1217 // size_t newlen = req->newlen; /* user buffer copy in size */
1218 int error;
1219
1220 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1221
1222 /* adjust index so we return the right required/consumed amount */
1223 if (!error)
1224 req->oldidx += req->oldlen;
1225
1226 return (error);
1227 }
1228 SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1229 0, /* Pointer argument (arg1) */
1230 0, /* Integer argument (arg2) */
1231 sysctl_doprocargs, /* Handler function */
1232 NULL, /* Data pointer */
1233 "");
1234 #endif /* !CONFIG_EMBEDDED */
1235
1236 STATIC int
1237 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1238 {
1239 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1240 int *name = arg1; /* oid element argument vector */
1241 int namelen = arg2; /* number of oid element arguments */
1242 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1243 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1244 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1245 // size_t newlen = req->newlen; /* user buffer copy in size */
1246 int error;
1247
1248 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1249
1250 /* adjust index so we return the right required/consumed amount */
1251 if (!error)
1252 req->oldidx += req->oldlen;
1253
1254 return (error);
1255 }
1256 SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1257 0, /* Pointer argument (arg1) */
1258 0, /* Integer argument (arg2) */
1259 sysctl_doprocargs2, /* Handler function */
1260 NULL, /* Data pointer */
1261 "");
1262
1263 STATIC int
1264 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1265 size_t *sizep, proc_t cur_proc, int argc_yes)
1266 {
1267 proc_t p;
1268 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1269 int error = 0;
1270 struct _vm_map *proc_map;
1271 struct task * task;
1272 vm_map_copy_t tmp;
1273 user_addr_t arg_addr;
1274 size_t arg_size;
1275 caddr_t data;
1276 size_t argslen=0;
1277 int size;
1278 vm_size_t alloc_size = 0;
1279 vm_offset_t copy_start, copy_end;
1280 kern_return_t ret;
1281 int pid;
1282 kauth_cred_t my_cred;
1283 uid_t uid;
1284 int argc = -1;
1285
1286 if ( namelen < 1 )
1287 return(EINVAL);
1288
1289 if (argc_yes)
1290 buflen -= sizeof(int); /* reserve first word to return argc */
1291
1292 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1293 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1294 /* is not NULL then the caller wants us to return the length needed to */
1295 /* hold the data we would return */
1296 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1297 return(EINVAL);
1298 }
1299 arg_size = buflen;
1300
1301 /*
1302 * Lookup process by pid
1303 */
1304 pid = name[0];
1305 p = proc_find(pid);
1306 if (p == NULL) {
1307 return(EINVAL);
1308 }
1309
1310 /*
1311 * Copy the top N bytes of the stack.
1312 * On all machines we have so far, the stack grows
1313 * downwards.
1314 *
1315 * If the user expects no more than N bytes of
1316 * argument list, use that as a guess for the
1317 * size.
1318 */
1319
1320 if (!p->user_stack) {
1321 proc_rele(p);
1322 return(EINVAL);
1323 }
1324
1325 if (where == USER_ADDR_NULL) {
1326 /* caller only wants to know length of proc args data */
1327 if (sizep == NULL) {
1328 proc_rele(p);
1329 return(EFAULT);
1330 }
1331
1332 size = p->p_argslen;
1333 proc_rele(p);
1334 if (argc_yes) {
1335 size += sizeof(int);
1336 } else {
1337 /*
1338 * old PROCARGS will return the executable's path and plus some
1339 * extra space for work alignment and data tags
1340 */
1341 size += PATH_MAX + (6 * sizeof(int));
1342 }
1343 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1344 *sizep = size;
1345 return (0);
1346 }
1347
1348 my_cred = kauth_cred_proc_ref(p);
1349 uid = kauth_cred_getuid(my_cred);
1350 kauth_cred_unref(&my_cred);
1351
1352 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1353 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1354 proc_rele(p);
1355 return (EINVAL);
1356 }
1357
1358 if ((u_int)arg_size > p->p_argslen)
1359 arg_size = round_page(p->p_argslen);
1360
1361 arg_addr = p->user_stack - arg_size;
1362
1363 /*
1364 * Before we can block (any VM code), make another
1365 * reference to the map to keep it alive. We do
1366 * that by getting a reference on the task itself.
1367 */
1368 task = p->task;
1369 if (task == NULL) {
1370 proc_rele(p);
1371 return(EINVAL);
1372 }
1373
1374 /* save off argc before releasing the proc */
1375 argc = p->p_argc;
1376
1377 argslen = p->p_argslen;
1378 /*
1379 * Once we have a task reference we can convert that into a
1380 * map reference, which we will use in the calls below. The
1381 * task/process may change its map after we take this reference
1382 * (see execve), but the worst that will happen then is a return
1383 * of stale info (which is always a possibility).
1384 */
1385 task_reference(task);
1386 proc_rele(p);
1387 proc_map = get_task_map_reference(task);
1388 task_deallocate(task);
1389
1390 if (proc_map == NULL)
1391 return(EINVAL);
1392
1393 alloc_size = round_page(arg_size);
1394 ret = kmem_alloc(kernel_map, &copy_start, alloc_size, VM_KERN_MEMORY_BSD);
1395 if (ret != KERN_SUCCESS) {
1396 vm_map_deallocate(proc_map);
1397 return(ENOMEM);
1398 }
1399 bzero((void *)copy_start, alloc_size);
1400
1401 copy_end = round_page(copy_start + arg_size);
1402
1403 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1404 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1405 vm_map_deallocate(proc_map);
1406 kmem_free(kernel_map, copy_start,
1407 round_page(arg_size));
1408 return (EIO);
1409 }
1410
1411 /*
1412 * Now that we've done the copyin from the process'
1413 * map, we can release the reference to it.
1414 */
1415 vm_map_deallocate(proc_map);
1416
1417 if( vm_map_copy_overwrite(kernel_map,
1418 (vm_map_address_t)copy_start,
1419 tmp, FALSE) != KERN_SUCCESS) {
1420 kmem_free(kernel_map, copy_start,
1421 round_page(arg_size));
1422 vm_map_copy_discard(tmp);
1423 return (EIO);
1424 }
1425
1426 if (arg_size > argslen) {
1427 data = (caddr_t) (copy_end - argslen);
1428 size = argslen;
1429 } else {
1430 data = (caddr_t) (copy_end - arg_size);
1431 size = arg_size;
1432 }
1433
1434 /*
1435 * When these sysctls were introduced, the first string in the strings
1436 * section was just the bare path of the executable. However, for security
1437 * reasons we now prefix this string with executable_path= so it can be
1438 * parsed getenv style. To avoid binary compatability issues with exising
1439 * callers of this sysctl, we strip it off here if present.
1440 * (rdar://problem/13746466)
1441 */
1442 #define EXECUTABLE_KEY "executable_path="
1443 if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
1444 data += strlen(EXECUTABLE_KEY);
1445 size -= strlen(EXECUTABLE_KEY);
1446 }
1447
1448 if (argc_yes) {
1449 /* Put processes argc as the first word in the copyout buffer */
1450 suword(where, argc);
1451 error = copyout(data, (where + sizeof(int)), size);
1452 size += sizeof(int);
1453 } else {
1454 error = copyout(data, where, size);
1455
1456 /*
1457 * Make the old PROCARGS work to return the executable's path
1458 * But, only if there is enough space in the provided buffer
1459 *
1460 * on entry: data [possibily] points to the beginning of the path
1461 *
1462 * Note: we keep all pointers&sizes aligned to word boundries
1463 */
1464 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1465 {
1466 int binPath_sz, alignedBinPath_sz = 0;
1467 int extraSpaceNeeded, addThis;
1468 user_addr_t placeHere;
1469 char * str = (char *) data;
1470 int max_len = size;
1471
1472 /* Some apps are really bad about messing up their stacks
1473 So, we have to be extra careful about getting the length
1474 of the executing binary. If we encounter an error, we bail.
1475 */
1476
1477 /* Limit ourselves to PATH_MAX paths */
1478 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1479
1480 binPath_sz = 0;
1481
1482 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1483 binPath_sz++;
1484
1485 /* If we have a NUL terminator, copy it, too */
1486 if (binPath_sz < max_len-1) binPath_sz += 1;
1487
1488 /* Pre-Flight the space requiremnts */
1489
1490 /* Account for the padding that fills out binPath to the next word */
1491 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1492
1493 placeHere = where + size;
1494
1495 /* Account for the bytes needed to keep placeHere word aligned */
1496 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1497
1498 /* Add up all the space that is needed */
1499 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1500
1501 /* is there is room to tack on argv[0]? */
1502 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1503 {
1504 placeHere += addThis;
1505 suword(placeHere, 0);
1506 placeHere += sizeof(int);
1507 suword(placeHere, 0xBFFF0000);
1508 placeHere += sizeof(int);
1509 suword(placeHere, 0);
1510 placeHere += sizeof(int);
1511 error = copyout(data, placeHere, binPath_sz);
1512 if ( ! error )
1513 {
1514 placeHere += binPath_sz;
1515 suword(placeHere, 0);
1516 size += extraSpaceNeeded;
1517 }
1518 }
1519 }
1520 }
1521
1522 if (copy_start != (vm_offset_t) 0) {
1523 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1524 }
1525 if (error) {
1526 return(error);
1527 }
1528
1529 if (where != USER_ADDR_NULL)
1530 *sizep = size;
1531 return (0);
1532 }
1533
1534
1535 /*
1536 * Max number of concurrent aio requests
1537 */
1538 STATIC int
1539 sysctl_aiomax
1540 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1541 {
1542 int new_value, changed;
1543 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1544 if (changed) {
1545 /* make sure the system-wide limit is greater than the per process limit */
1546 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1547 aio_max_requests = new_value;
1548 else
1549 error = EINVAL;
1550 }
1551 return(error);
1552 }
1553
1554
1555 /*
1556 * Max number of concurrent aio requests per process
1557 */
1558 STATIC int
1559 sysctl_aioprocmax
1560 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1561 {
1562 int new_value, changed;
1563 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1564 if (changed) {
1565 /* make sure per process limit is less than the system-wide limit */
1566 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1567 aio_max_requests_per_process = new_value;
1568 else
1569 error = EINVAL;
1570 }
1571 return(error);
1572 }
1573
1574
1575 /*
1576 * Max number of async IO worker threads
1577 */
1578 STATIC int
1579 sysctl_aiothreads
1580 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1581 {
1582 int new_value, changed;
1583 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1584 if (changed) {
1585 /* we only allow an increase in the number of worker threads */
1586 if (new_value > aio_worker_threads ) {
1587 _aio_create_worker_threads((new_value - aio_worker_threads));
1588 aio_worker_threads = new_value;
1589 }
1590 else
1591 error = EINVAL;
1592 }
1593 return(error);
1594 }
1595
1596
1597 /*
1598 * System-wide limit on the max number of processes
1599 */
1600 STATIC int
1601 sysctl_maxproc
1602 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1603 {
1604 int new_value, changed;
1605 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1606 if (changed) {
1607 AUDIT_ARG(value32, new_value);
1608 /* make sure the system-wide limit is less than the configured hard
1609 limit set at kernel compilation */
1610 if (new_value <= hard_maxproc && new_value > 0)
1611 maxproc = new_value;
1612 else
1613 error = EINVAL;
1614 }
1615 return(error);
1616 }
1617
1618 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1619 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1620 ostype, 0, "");
1621 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1622 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1623 osrelease, 0, "");
1624 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1625 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1626 (int *)NULL, BSD, "");
1627 SYSCTL_STRING(_kern, KERN_VERSION, version,
1628 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1629 version, 0, "");
1630 SYSCTL_STRING(_kern, OID_AUTO, uuid,
1631 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1632 &kernel_uuid_string[0], 0, "");
1633
1634 SYSCTL_STRING(_kern, OID_AUTO, osbuildconfig,
1635 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1636 &osbuild_config[0], 0, "");
1637
1638 #if DEBUG
1639 #ifndef DKPR
1640 #define DKPR 1
1641 #endif
1642 #endif
1643
1644 #if DKPR
1645 int debug_kprint_syscall = 0;
1646 char debug_kprint_syscall_process[MAXCOMLEN+1];
1647
1648 /* Thread safe: bits and string value are not used to reclaim state */
1649 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1650 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1651 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1652 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1653 "name of process for kprintf syscall tracing");
1654
1655 int debug_kprint_current_process(const char **namep)
1656 {
1657 struct proc *p = current_proc();
1658
1659 if (p == NULL) {
1660 return 0;
1661 }
1662
1663 if (debug_kprint_syscall_process[0]) {
1664 /* user asked to scope tracing to a particular process name */
1665 if(0 == strncmp(debug_kprint_syscall_process,
1666 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1667 /* no value in telling the user that we traced what they asked */
1668 if(namep) *namep = NULL;
1669
1670 return 1;
1671 } else {
1672 return 0;
1673 }
1674 }
1675
1676 /* trace all processes. Tell user what we traced */
1677 if (namep) {
1678 *namep = p->p_comm;
1679 }
1680
1681 return 1;
1682 }
1683 #endif
1684
1685 /* PR-5293665: need to use a callback function for kern.osversion to set
1686 * osversion in IORegistry */
1687
1688 STATIC int
1689 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1690 {
1691 int rval = 0;
1692
1693 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1694
1695 if (req->newptr) {
1696 IORegistrySetOSBuildVersion((char *)arg1);
1697 }
1698
1699 return rval;
1700 }
1701
1702 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1703 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1704 osversion, 256 /* OSVERSIZE*/,
1705 sysctl_osversion, "A", "");
1706
1707 static uint64_t osproductversion_string[48];
1708
1709 STATIC int
1710 sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1711 {
1712 if (req->newptr != 0) {
1713 /*
1714 * Can only ever be set by launchd, and only once at boot.
1715 */
1716 if (req->p->p_pid != 1 || osproductversion_string[0] != '\0') {
1717 return EPERM;
1718 }
1719 }
1720
1721 return sysctl_handle_string(oidp, arg1, arg2, req);
1722 }
1723
1724 SYSCTL_PROC(_kern, OID_AUTO, osproductversion,
1725 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1726 osproductversion_string, sizeof(osproductversion_string),
1727 sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist");
1728
1729 static uint64_t osvariant_status = 0;
1730
1731 STATIC int
1732 sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1733 {
1734 if (req->newptr != 0) {
1735 /*
1736 * Can only ever be set by launchd, and only once at boot.
1737 */
1738 if (req->p->p_pid != 1 || osvariant_status != 0) {
1739 return EPERM;
1740 }
1741 }
1742
1743 return sysctl_handle_quad(oidp, arg1, arg2, req);
1744 }
1745
1746 SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
1747 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1748 &osvariant_status, sizeof(osvariant_status),
1749 sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
1750
1751 STATIC int
1752 sysctl_sysctl_bootargs
1753 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1754 {
1755 int error;
1756 /* BOOT_LINE_LENGTH */
1757 #if CONFIG_EMBEDDED
1758 size_t boot_args_len = 256;
1759 #else
1760 size_t boot_args_len = 1024;
1761 #endif
1762 char buf[boot_args_len];
1763
1764 strlcpy(buf, PE_boot_args(), boot_args_len);
1765 error = sysctl_io_string(req, buf, boot_args_len, 0, NULL);
1766 return(error);
1767 }
1768
1769 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1770 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1771 NULL, 0,
1772 sysctl_sysctl_bootargs, "A", "bootargs");
1773
1774 STATIC int
1775 sysctl_kernelcacheuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1776 {
1777 int rval = ENOENT;
1778 if (kernelcache_uuid_valid) {
1779 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1780 }
1781 return rval;
1782 }
1783
1784 SYSCTL_PROC(_kern, OID_AUTO, kernelcacheuuid,
1785 CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1786 kernelcache_uuid_string, sizeof(kernelcache_uuid_string),
1787 sysctl_kernelcacheuuid, "A", "");
1788
1789 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1790 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1791 &maxfiles, 0, "");
1792 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1793 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1794 (int *)NULL, ARG_MAX, "");
1795 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1796 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1797 (int *)NULL, _POSIX_VERSION, "");
1798 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1799 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1800 (int *)NULL, NGROUPS_MAX, "");
1801 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1802 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1803 (int *)NULL, 1, "");
1804 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1805 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1806 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1807 (int *)NULL, 1, "");
1808 #else
1809 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1810 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1811 NULL, 0, "");
1812 #endif
1813 SYSCTL_INT(_kern, OID_AUTO, num_files,
1814 CTLFLAG_RD | CTLFLAG_LOCKED,
1815 &nfiles, 0, "");
1816 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1817 CTLFLAG_RD | CTLFLAG_LOCKED,
1818 &numvnodes, 0, "");
1819 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1820 CTLFLAG_RD | CTLFLAG_LOCKED,
1821 &task_max, 0, "");
1822 SYSCTL_INT(_kern, OID_AUTO, num_threads,
1823 CTLFLAG_RD | CTLFLAG_LOCKED,
1824 &thread_max, 0, "");
1825 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1826 CTLFLAG_RD | CTLFLAG_LOCKED,
1827 &task_threadmax, 0, "");
1828
1829 STATIC int
1830 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1831 {
1832 int oldval = desiredvnodes;
1833 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1834
1835 if (oldval != desiredvnodes) {
1836 resize_namecache(desiredvnodes);
1837 }
1838
1839 return(error);
1840 }
1841
1842 SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1843 CTLFLAG_RW | CTLFLAG_LOCKED,
1844 &nc_disabled, 0, "");
1845
1846 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1847 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1848 0, 0, sysctl_maxvnodes, "I", "");
1849
1850 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1851 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1852 0, 0, sysctl_maxproc, "I", "");
1853
1854 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1855 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1856 0, 0, sysctl_aiomax, "I", "");
1857
1858 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1859 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1860 0, 0, sysctl_aioprocmax, "I", "");
1861
1862 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1863 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1864 0, 0, sysctl_aiothreads, "I", "");
1865
1866 #if (DEVELOPMENT || DEBUG)
1867 extern int sched_smt_balance;
1868 SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1869 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1870 &sched_smt_balance, 0, "");
1871 extern int sched_allow_rt_smt;
1872 SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt,
1873 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1874 &sched_allow_rt_smt, 0, "");
1875 #if __arm__ || __arm64__
1876 extern uint32_t perfcontrol_requested_recommended_cores;
1877 SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores,
1878 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1879 &perfcontrol_requested_recommended_cores, 0, "");
1880
1881 /* Scheduler perfcontrol callouts sysctls */
1882 SYSCTL_DECL(_kern_perfcontrol_callout);
1883 SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
1884 "scheduler perfcontrol callouts");
1885
1886 extern int perfcontrol_callout_stats_enabled;
1887 SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled,
1888 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1889 &perfcontrol_callout_stats_enabled, 0, "");
1890
1891 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
1892 perfcontrol_callout_stat_t stat);
1893
1894 /* On-Core Callout */
1895 STATIC int
1896 sysctl_perfcontrol_callout_stat
1897 (__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1898 {
1899 perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
1900 perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
1901 return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
1902 sizeof(int), NULL, NULL);
1903 }
1904
1905 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
1906 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1907 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
1908 sysctl_perfcontrol_callout_stat, "I", "");
1909 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
1910 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1911 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
1912 sysctl_perfcontrol_callout_stat, "I", "");
1913 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
1914 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1915 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
1916 sysctl_perfcontrol_callout_stat, "I", "");
1917 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
1918 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1919 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
1920 sysctl_perfcontrol_callout_stat, "I", "");
1921 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
1922 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1923 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
1924 sysctl_perfcontrol_callout_stat, "I", "");
1925 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
1926 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1927 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
1928 sysctl_perfcontrol_callout_stat, "I", "");
1929 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
1930 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1931 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
1932 sysctl_perfcontrol_callout_stat, "I", "");
1933 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
1934 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1935 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
1936 sysctl_perfcontrol_callout_stat, "I", "");
1937
1938 #endif /* __arm__ || __arm64__ */
1939 #endif /* (DEVELOPMENT || DEBUG) */
1940
1941 STATIC int
1942 sysctl_securelvl
1943 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1944 {
1945 int new_value, changed;
1946 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1947 if (changed) {
1948 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1949 proc_list_lock();
1950 securelevel = new_value;
1951 proc_list_unlock();
1952 } else {
1953 error = EPERM;
1954 }
1955 }
1956 return(error);
1957 }
1958
1959 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1960 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1961 0, 0, sysctl_securelvl, "I", "");
1962
1963
1964 STATIC int
1965 sysctl_domainname
1966 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1967 {
1968 int error, changed;
1969 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1970 if (changed) {
1971 domainnamelen = strlen(domainname);
1972 }
1973 return(error);
1974 }
1975
1976 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1977 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1978 0, 0, sysctl_domainname, "A", "");
1979
1980 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1981 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1982 &hostid, 0, "");
1983
1984 STATIC int
1985 sysctl_hostname
1986 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1987 {
1988 int error, changed;
1989 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1990 if (changed) {
1991 hostnamelen = req->newlen;
1992 }
1993 return(error);
1994 }
1995
1996
1997 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1998 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1999 0, 0, sysctl_hostname, "A", "");
2000
2001 STATIC int
2002 sysctl_procname
2003 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2004 {
2005 /* Original code allowed writing, I'm copying this, although this all makes
2006 no sense to me. Besides, this sysctl is never used. */
2007 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
2008 }
2009
2010 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
2011 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2012 0, 0, sysctl_procname, "A", "");
2013
2014 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
2015 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2016 &speculative_reads_disabled, 0, "");
2017
2018 SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
2019 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2020 &preheat_max_bytes, 0, "");
2021
2022 SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
2023 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2024 &preheat_min_bytes, 0, "");
2025
2026 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
2027 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2028 &speculative_prefetch_max, 0, "");
2029
2030 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
2031 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2032 &speculative_prefetch_max_iosize, 0, "");
2033
2034 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
2035 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2036 &vm_page_free_target, 0, "");
2037
2038 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
2039 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2040 &vm_page_free_min, 0, "");
2041
2042 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
2043 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2044 &vm_page_free_reserved, 0, "");
2045
2046 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
2047 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2048 &vm_pageout_state.vm_page_speculative_percentage, 0, "");
2049
2050 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
2051 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2052 &vm_pageout_state.vm_page_speculative_q_age_ms, 0, "");
2053
2054 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
2055 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2056 &vm_max_delayed_work_limit, 0, "");
2057
2058 SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
2059 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2060 &vm_max_batch, 0, "");
2061
2062 SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
2063 CTLFLAG_RD | CTLFLAG_LOCKED,
2064 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
2065
2066 STATIC int
2067 sysctl_boottime
2068 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2069 {
2070 struct timeval tv;
2071 boottime_timeval(&tv);
2072 struct proc *p = req->p;
2073
2074 if (proc_is64bit(p)) {
2075 struct user64_timeval t = {};
2076 t.tv_sec = tv.tv_sec;
2077 t.tv_usec = tv.tv_usec;
2078 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2079 } else {
2080 struct user32_timeval t = {};
2081 t.tv_sec = tv.tv_sec;
2082 t.tv_usec = tv.tv_usec;
2083 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2084 }
2085 }
2086
2087 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2088 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2089 0, 0, sysctl_boottime, "S,timeval", "");
2090
2091 STATIC int
2092 sysctl_symfile
2093 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2094 {
2095 char *str;
2096 int error = get_kernel_symfile(req->p, &str);
2097 if (error)
2098 return (error);
2099 return sysctl_io_string(req, str, 0, 0, NULL);
2100 }
2101
2102
2103 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2104 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
2105 0, 0, sysctl_symfile, "A", "");
2106
2107 #if NFSCLIENT
2108 STATIC int
2109 sysctl_netboot
2110 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2111 {
2112 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2113 }
2114
2115 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2116 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2117 0, 0, sysctl_netboot, "I", "");
2118 #endif
2119
2120 #ifdef CONFIG_IMGSRC_ACCESS
2121 /*
2122 * Legacy--act as if only one layer of nesting is possible.
2123 */
2124 STATIC int
2125 sysctl_imgsrcdev
2126 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2127 {
2128 vfs_context_t ctx = vfs_context_current();
2129 vnode_t devvp;
2130 int result;
2131
2132 if (!vfs_context_issuser(ctx)) {
2133 return EPERM;
2134 }
2135
2136 if (imgsrc_rootvnodes[0] == NULL) {
2137 return ENOENT;
2138 }
2139
2140 result = vnode_getwithref(imgsrc_rootvnodes[0]);
2141 if (result != 0) {
2142 return result;
2143 }
2144
2145 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
2146 result = vnode_getwithref(devvp);
2147 if (result != 0) {
2148 goto out;
2149 }
2150
2151 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
2152
2153 vnode_put(devvp);
2154 out:
2155 vnode_put(imgsrc_rootvnodes[0]);
2156 return result;
2157 }
2158
2159 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
2160 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2161 0, 0, sysctl_imgsrcdev, "I", "");
2162
2163 STATIC int
2164 sysctl_imgsrcinfo
2165 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2166 {
2167 int error;
2168 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */
2169 uint32_t i;
2170 vnode_t rvp, devvp;
2171
2172 if (imgsrc_rootvnodes[0] == NULLVP) {
2173 return ENXIO;
2174 }
2175
2176 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2177 /*
2178 * Go get the root vnode.
2179 */
2180 rvp = imgsrc_rootvnodes[i];
2181 if (rvp == NULLVP) {
2182 break;
2183 }
2184
2185 error = vnode_get(rvp);
2186 if (error != 0) {
2187 return error;
2188 }
2189
2190 /*
2191 * For now, no getting at a non-local volume.
2192 */
2193 devvp = vnode_mount(rvp)->mnt_devvp;
2194 if (devvp == NULL) {
2195 vnode_put(rvp);
2196 return EINVAL;
2197 }
2198
2199 error = vnode_getwithref(devvp);
2200 if (error != 0) {
2201 vnode_put(rvp);
2202 return error;
2203 }
2204
2205 /*
2206 * Fill in info.
2207 */
2208 info[i].ii_dev = vnode_specrdev(devvp);
2209 info[i].ii_flags = 0;
2210 info[i].ii_height = i;
2211 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2212
2213 vnode_put(devvp);
2214 vnode_put(rvp);
2215 }
2216
2217 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2218 }
2219
2220 SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2221 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2222 0, 0, sysctl_imgsrcinfo, "I", "");
2223
2224 #endif /* CONFIG_IMGSRC_ACCESS */
2225
2226
2227 SYSCTL_DECL(_kern_timer);
2228 SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2229
2230
2231 SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2232 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2233 &mach_timer_coalescing_enabled, 0, "");
2234
2235 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2236 CTLFLAG_RW | CTLFLAG_LOCKED,
2237 &timer_deadline_tracking_bin_1, "");
2238 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2239 CTLFLAG_RW | CTLFLAG_LOCKED,
2240 &timer_deadline_tracking_bin_2, "");
2241
2242 SYSCTL_DECL(_kern_timer_longterm);
2243 SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2244
2245
2246 /* Must match definition in osfmk/kern/timer_call.c */
2247 enum {
2248 THRESHOLD, QCOUNT,
2249 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2250 LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES
2251 };
2252 extern uint64_t timer_sysctl_get(int);
2253 extern int timer_sysctl_set(int, uint64_t);
2254
2255 STATIC int
2256 sysctl_timer
2257 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2258 {
2259 int oid = (int)arg1;
2260 uint64_t value = timer_sysctl_get(oid);
2261 uint64_t new_value;
2262 int error;
2263 int changed;
2264
2265 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2266 if (changed)
2267 error = timer_sysctl_set(oid, new_value);
2268
2269 return error;
2270 }
2271
2272 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2273 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2274 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2275 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
2276 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2277 (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
2278 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_interval,
2279 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2280 (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", "");
2281
2282 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2283 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2284 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
2285 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
2286 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2287 (void *) PAUSES, 0, sysctl_timer, "Q", "");
2288
2289 #if DEBUG
2290 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2291 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2292 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2293 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2294 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2295 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2296 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2297 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2298 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2299 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2300 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2301 (void *) SCANS, 0, sysctl_timer, "Q", "");
2302 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2303 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2304 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2305 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2306 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2307 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2308 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2309 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2310 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2311 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2312 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2313 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2314 #endif /* DEBUG */
2315
2316 STATIC int
2317 sysctl_usrstack
2318 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2319 {
2320 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2321 }
2322
2323 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2324 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2325 0, 0, sysctl_usrstack, "I", "");
2326
2327 STATIC int
2328 sysctl_usrstack64
2329 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2330 {
2331 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2332 }
2333
2334 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2335 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2336 0, 0, sysctl_usrstack64, "Q", "");
2337
2338 #if CONFIG_COREDUMP
2339
2340 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2341 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2342 corefilename, sizeof(corefilename), "");
2343
2344 STATIC int
2345 sysctl_coredump
2346 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2347 {
2348 #ifdef SECURE_KERNEL
2349 (void)req;
2350 return (ENOTSUP);
2351 #else
2352 int new_value, changed;
2353 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2354 if (changed) {
2355 if ((new_value == 0) || (new_value == 1))
2356 do_coredump = new_value;
2357 else
2358 error = EINVAL;
2359 }
2360 return(error);
2361 #endif
2362 }
2363
2364 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2365 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2366 0, 0, sysctl_coredump, "I", "");
2367
2368 STATIC int
2369 sysctl_suid_coredump
2370 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2371 {
2372 #ifdef SECURE_KERNEL
2373 (void)req;
2374 return (ENOTSUP);
2375 #else
2376 int new_value, changed;
2377 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2378 if (changed) {
2379 if ((new_value == 0) || (new_value == 1))
2380 sugid_coredump = new_value;
2381 else
2382 error = EINVAL;
2383 }
2384 return(error);
2385 #endif
2386 }
2387
2388 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2389 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2390 0, 0, sysctl_suid_coredump, "I", "");
2391
2392 #endif /* CONFIG_COREDUMP */
2393
2394 STATIC int
2395 sysctl_delayterm
2396 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2397 {
2398 struct proc *p = req->p;
2399 int new_value, changed;
2400 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2401 if (changed) {
2402 proc_lock(p);
2403 if (new_value)
2404 req->p->p_lflag |= P_LDELAYTERM;
2405 else
2406 req->p->p_lflag &= ~P_LDELAYTERM;
2407 proc_unlock(p);
2408 }
2409 return(error);
2410 }
2411
2412 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2413 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2414 0, 0, sysctl_delayterm, "I", "");
2415
2416
2417 STATIC int
2418 sysctl_rage_vnode
2419 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2420 {
2421 struct proc *p = req->p;
2422 struct uthread *ut;
2423 int new_value, old_value, changed;
2424 int error;
2425
2426 ut = get_bsdthread_info(current_thread());
2427
2428 if (ut->uu_flag & UT_RAGE_VNODES)
2429 old_value = KERN_RAGE_THREAD;
2430 else if (p->p_lflag & P_LRAGE_VNODES)
2431 old_value = KERN_RAGE_PROC;
2432 else
2433 old_value = 0;
2434
2435 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2436
2437 if (error == 0) {
2438 switch (new_value) {
2439 case KERN_RAGE_PROC:
2440 proc_lock(p);
2441 p->p_lflag |= P_LRAGE_VNODES;
2442 proc_unlock(p);
2443 break;
2444 case KERN_UNRAGE_PROC:
2445 proc_lock(p);
2446 p->p_lflag &= ~P_LRAGE_VNODES;
2447 proc_unlock(p);
2448 break;
2449
2450 case KERN_RAGE_THREAD:
2451 ut->uu_flag |= UT_RAGE_VNODES;
2452 break;
2453 case KERN_UNRAGE_THREAD:
2454 ut = get_bsdthread_info(current_thread());
2455 ut->uu_flag &= ~UT_RAGE_VNODES;
2456 break;
2457 }
2458 }
2459 return(error);
2460 }
2461
2462 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2463 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2464 0, 0, sysctl_rage_vnode, "I", "");
2465
2466 /* XXX move this interface into libproc and remove this sysctl */
2467 STATIC int
2468 sysctl_setthread_cpupercent
2469 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2470 {
2471 int new_value, old_value;
2472 int error = 0;
2473 kern_return_t kret = KERN_SUCCESS;
2474 uint8_t percent = 0;
2475 int ms_refill = 0;
2476
2477 if (!req->newptr)
2478 return (0);
2479
2480 old_value = 0;
2481
2482 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2483 return (error);
2484
2485 percent = new_value & 0xff; /* low 8 bytes for perent */
2486 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2487 if (percent > 100)
2488 return (EINVAL);
2489
2490 /*
2491 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2492 */
2493 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2494 return (EIO);
2495
2496 return (0);
2497 }
2498
2499 SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2500 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2501 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2502
2503
2504 STATIC int
2505 sysctl_kern_check_openevt
2506 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2507 {
2508 struct proc *p = req->p;
2509 int new_value, old_value, changed;
2510 int error;
2511
2512 if (p->p_flag & P_CHECKOPENEVT) {
2513 old_value = KERN_OPENEVT_PROC;
2514 } else {
2515 old_value = 0;
2516 }
2517
2518 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2519
2520 if (error == 0) {
2521 switch (new_value) {
2522 case KERN_OPENEVT_PROC:
2523 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2524 break;
2525
2526 case KERN_UNOPENEVT_PROC:
2527 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2528 break;
2529
2530 default:
2531 error = EINVAL;
2532 }
2533 }
2534 return(error);
2535 }
2536
2537 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2538 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2539
2540
2541
2542 STATIC int
2543 sysctl_nx
2544 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2545 {
2546 #ifdef SECURE_KERNEL
2547 (void)req;
2548 return ENOTSUP;
2549 #else
2550 int new_value, changed;
2551 int error;
2552
2553 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2554 if (error)
2555 return error;
2556
2557 if (changed) {
2558 #if defined(__i386__) || defined(__x86_64__)
2559 /*
2560 * Only allow setting if NX is supported on the chip
2561 */
2562 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2563 return ENOTSUP;
2564 #endif
2565 nx_enabled = new_value;
2566 }
2567 return(error);
2568 #endif /* SECURE_KERNEL */
2569 }
2570
2571
2572
2573 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2574 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2575 0, 0, sysctl_nx, "I", "");
2576
2577 STATIC int
2578 sysctl_loadavg
2579 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2580 {
2581 if (proc_is64bit(req->p)) {
2582 struct user64_loadavg loadinfo64 = {};
2583 fill_loadavg64(&averunnable, &loadinfo64);
2584 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2585 } else {
2586 struct user32_loadavg loadinfo32 = {};
2587 fill_loadavg32(&averunnable, &loadinfo32);
2588 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2589 }
2590 }
2591
2592 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2593 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2594 0, 0, sysctl_loadavg, "S,loadavg", "");
2595
2596 /*
2597 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2598 */
2599 STATIC int
2600 sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2601 __unused int arg2, struct sysctl_req *req)
2602 {
2603 int old_value=0, new_value=0, error=0;
2604
2605 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2606 return(error);
2607 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2608 if (!error) {
2609 return (vm_toggle_entry_reuse(new_value, NULL));
2610 }
2611 return(error);
2612 }
2613
2614 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2615
2616 #ifdef CONFIG_XNUPOST
2617
2618 extern int xnupost_export_testdata(void *outp, uint32_t size, uint32_t *lenp);
2619 extern uint32_t xnupost_get_estimated_testdata_size(void);
2620
2621 extern int xnupost_reset_all_tests(void);
2622
2623 STATIC int
2624 sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
2625 {
2626 /* fixup unused arguments warnings */
2627 __unused int _oa2 = arg2;
2628 __unused void * _oa1 = arg1;
2629 __unused struct sysctl_oid * _oidp = oidp;
2630
2631 int error = 0;
2632 user_addr_t oldp = 0;
2633 user_addr_t newp = 0;
2634 uint32_t usedbytes = 0;
2635
2636 oldp = req->oldptr;
2637 newp = req->newptr;
2638
2639 if (newp)
2640 return ENOTSUP;
2641
2642 if ((void *)oldp == NULL) {
2643 /* return estimated size for second call where info can be placed */
2644 req->oldidx = xnupost_get_estimated_testdata_size();
2645 } else {
2646 error = xnupost_export_testdata((void *)oldp, req->oldlen, &usedbytes);
2647 req->oldidx = usedbytes;
2648 }
2649
2650 return error;
2651 }
2652
2653 SYSCTL_PROC(_debug,
2654 OID_AUTO,
2655 xnupost_get_tests,
2656 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2657 0,
2658 0,
2659 sysctl_handle_xnupost_get_tests,
2660 "-",
2661 "read xnupost test data in kernel");
2662
2663 STATIC int
2664 sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
2665 {
2666 /* fixup unused arguments warnings */
2667 __unused int _oa2 = arg2;
2668 __unused void * _oa1 = arg1;
2669 __unused struct sysctl_oid * _oidp = oidp;
2670
2671 #define ARRCOUNT 4
2672 /*
2673 * INPUT: ACTION, PARAM1, PARAM2, PARAM3
2674 * OUTPUT: RESULTCODE, ADDITIONAL DATA
2675 */
2676 int32_t outval[ARRCOUNT] = {0};
2677 int32_t input[ARRCOUNT] = {0};
2678 int32_t out_size = sizeof(outval);
2679 int32_t in_size = sizeof(input);
2680 int error = 0;
2681
2682 /* if this is NULL call to find out size, send out size info */
2683 if (!req->newptr) {
2684 goto out;
2685 }
2686
2687 /* pull in provided value from userspace */
2688 error = SYSCTL_IN(req, &input[0], in_size);
2689 if (error)
2690 return error;
2691
2692 if (input[0] == XTCTL_RESET_TESTDATA) {
2693 outval[0] = xnupost_reset_all_tests();
2694 goto out;
2695 }
2696
2697 out:
2698 error = SYSCTL_OUT(req, &outval[0], out_size);
2699 return error;
2700 }
2701
2702 SYSCTL_PROC(_debug,
2703 OID_AUTO,
2704 xnupost_testctl,
2705 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2706 0,
2707 0,
2708 sysctl_debug_xnupost_ctl,
2709 "I",
2710 "xnupost control for kernel testing");
2711
2712 extern void test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t arraycount);
2713
2714 STATIC int
2715 sysctl_debug_test_oslog_ctl(__unused struct sysctl_oid * oidp, __unused void * arg1, __unused int arg2, struct sysctl_req * req)
2716 {
2717 #define ARRCOUNT 4
2718 int32_t outval[ARRCOUNT] = {0};
2719 int32_t input[ARRCOUNT] = {0};
2720 int32_t size_outval = sizeof(outval);
2721 int32_t size_inval = sizeof(input);
2722 int32_t error;
2723
2724 /* if this is NULL call to find out size, send out size info */
2725 if (!req->newptr) {
2726 error = SYSCTL_OUT(req, &outval[0], size_outval);
2727 return error;
2728 }
2729
2730 /* pull in provided value from userspace */
2731 error = SYSCTL_IN(req, &input[0], size_inval);
2732 if (error)
2733 return error;
2734
2735 test_oslog_handleOSLogCtl(input, outval, ARRCOUNT);
2736
2737 error = SYSCTL_OUT(req, &outval[0], size_outval);
2738
2739 return error;
2740 }
2741
2742 SYSCTL_PROC(_debug,
2743 OID_AUTO,
2744 test_OSLogCtl,
2745 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
2746 0,
2747 0,
2748 sysctl_debug_test_oslog_ctl,
2749 "I",
2750 "testing oslog in kernel");
2751
2752 #include <mach/task.h>
2753 #include <mach/semaphore.h>
2754
2755 extern lck_grp_t * sysctl_debug_test_stackshot_owner_grp; /* used for both mutexes and rwlocks */
2756 extern lck_mtx_t * sysctl_debug_test_stackshot_owner_init_mtx; /* used to protect lck_*_init */
2757
2758 /* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
2759 * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
2760 * stackshot is taken to see if the owner of the lock can be identified.
2761 *
2762 * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
2763 * the semaphores allow us to artificially create cases where the lock is being held and the
2764 * thread is hanging / taking a long time to do something. */
2765
2766 volatile char sysctl_debug_test_stackshot_mtx_inited = 0;
2767 semaphore_t sysctl_debug_test_stackshot_mutex_sem;
2768 lck_mtx_t sysctl_debug_test_stackshot_owner_lck;
2769
2770 #define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1
2771 #define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
2772 #define SYSCTL_DEBUG_MTX_SIGNAL 3
2773 #define SYSCTL_DEBUG_MTX_TEARDOWN 4
2774
2775 STATIC int
2776 sysctl_debug_test_stackshot_mutex_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2777 {
2778 long long option = -1;
2779 /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
2780 long long mtx_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck);
2781 int error = sysctl_io_number(req, mtx_unslid_addr, sizeof(long long), (void*)&option, NULL);
2782
2783 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
2784 if (!sysctl_debug_test_stackshot_mtx_inited) {
2785 lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck,
2786 sysctl_debug_test_stackshot_owner_grp,
2787 LCK_ATTR_NULL);
2788 semaphore_create(kernel_task,
2789 &sysctl_debug_test_stackshot_mutex_sem,
2790 SYNC_POLICY_FIFO, 0);
2791 sysctl_debug_test_stackshot_mtx_inited = 1;
2792 }
2793 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
2794
2795 if (!error) {
2796 switch(option) {
2797 case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT:
2798 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
2799 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
2800 break;
2801 case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT:
2802 lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
2803 semaphore_wait(sysctl_debug_test_stackshot_mutex_sem);
2804 lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
2805 break;
2806 case SYSCTL_DEBUG_MTX_SIGNAL:
2807 semaphore_signal(sysctl_debug_test_stackshot_mutex_sem);
2808 break;
2809 case SYSCTL_DEBUG_MTX_TEARDOWN:
2810 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
2811
2812 lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck,
2813 sysctl_debug_test_stackshot_owner_grp);
2814 semaphore_destroy(kernel_task,
2815 sysctl_debug_test_stackshot_mutex_sem);
2816 sysctl_debug_test_stackshot_mtx_inited = 0;
2817
2818 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
2819 break;
2820 case -1: /* user just wanted to read the value, so do nothing */
2821 break;
2822 default:
2823 error = EINVAL;
2824 break;
2825 }
2826 }
2827 return error;
2828 }
2829
2830 /* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
2831 * the semaphores allow us to artificially create cases where the lock is being held and the
2832 * thread is hanging / taking a long time to do something. */
2833
2834 SYSCTL_PROC(_debug,
2835 OID_AUTO,
2836 test_MutexOwnerCtl,
2837 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2838 0,
2839 0,
2840 sysctl_debug_test_stackshot_mutex_owner,
2841 "-",
2842 "Testing mutex owner in kernel");
2843
2844 volatile char sysctl_debug_test_stackshot_rwlck_inited = 0;
2845 lck_rw_t sysctl_debug_test_stackshot_owner_rwlck;
2846 semaphore_t sysctl_debug_test_stackshot_rwlck_sem;
2847
2848 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
2849 #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2
2850 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
2851 #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4
2852 #define SYSCTL_DEBUG_KRWLCK_SIGNAL 5
2853 #define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6
2854
2855 STATIC int
2856 sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2857 {
2858 long long option = -1;
2859 /* if the user tries to read the sysctl, we tell them what the address of the lock is
2860 * (to test against stackshot's output) */
2861 long long rwlck_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck);
2862 int error = sysctl_io_number(req, rwlck_unslid_addr, sizeof(long long), (void*)&option, NULL);
2863
2864 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
2865 if (!sysctl_debug_test_stackshot_rwlck_inited) {
2866 lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck,
2867 sysctl_debug_test_stackshot_owner_grp,
2868 LCK_ATTR_NULL);
2869 semaphore_create(kernel_task,
2870 &sysctl_debug_test_stackshot_rwlck_sem,
2871 SYNC_POLICY_FIFO,
2872 0);
2873 sysctl_debug_test_stackshot_rwlck_inited = 1;
2874 }
2875 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
2876
2877 if (!error) {
2878 switch(option) {
2879 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT:
2880 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
2881 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
2882 break;
2883 case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT:
2884 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
2885 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
2886 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
2887 break;
2888 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT:
2889 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
2890 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
2891 break;
2892 case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT:
2893 lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
2894 semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
2895 lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
2896 break;
2897 case SYSCTL_DEBUG_KRWLCK_SIGNAL:
2898 semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem);
2899 break;
2900 case SYSCTL_DEBUG_KRWLCK_TEARDOWN:
2901 lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
2902
2903 lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck,
2904 sysctl_debug_test_stackshot_owner_grp);
2905 semaphore_destroy(kernel_task,
2906 sysctl_debug_test_stackshot_rwlck_sem);
2907 sysctl_debug_test_stackshot_rwlck_inited = 0;
2908
2909 lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
2910 break;
2911 case -1: /* user just wanted to read the value, so do nothing */
2912 break;
2913 default:
2914 error = EINVAL;
2915 break;
2916 }
2917 }
2918 return error;
2919 }
2920
2921
2922 SYSCTL_PROC(_debug,
2923 OID_AUTO,
2924 test_RWLockOwnerCtl,
2925 CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2926 0,
2927 0,
2928 sysctl_debug_test_stackshot_rwlck_owner,
2929 "-",
2930 "Testing rwlock owner in kernel");
2931 #endif /* !CONFIG_XNUPOST */
2932
2933 STATIC int
2934 sysctl_swapusage
2935 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2936 {
2937 int error;
2938 uint64_t swap_total;
2939 uint64_t swap_avail;
2940 vm_size_t swap_pagesize;
2941 boolean_t swap_encrypted;
2942 struct xsw_usage xsu = {};
2943
2944 error = macx_swapinfo(&swap_total,
2945 &swap_avail,
2946 &swap_pagesize,
2947 &swap_encrypted);
2948 if (error)
2949 return error;
2950
2951 xsu.xsu_total = swap_total;
2952 xsu.xsu_avail = swap_avail;
2953 xsu.xsu_used = swap_total - swap_avail;
2954 xsu.xsu_pagesize = swap_pagesize;
2955 xsu.xsu_encrypted = swap_encrypted;
2956 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2957 }
2958
2959
2960
2961 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2962 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2963 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2964
2965 #if CONFIG_FREEZE
2966 extern void vm_page_reactivate_all_throttled(void);
2967 extern void memorystatus_disable_freeze(void);
2968
2969 static int
2970 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2971 {
2972 #pragma unused(arg1, arg2)
2973 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2974 boolean_t disabled;
2975
2976 error = sysctl_handle_int(oidp, &val, 0, req);
2977 if (error || !req->newptr)
2978 return (error);
2979
2980 if (! VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
2981 //assert(req->newptr);
2982 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2983 return EINVAL;
2984 }
2985
2986 /*
2987 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2988 */
2989 disabled = (!val && memorystatus_freeze_enabled);
2990
2991 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2992
2993 if (disabled) {
2994 vm_page_reactivate_all_throttled();
2995 memorystatus_disable_freeze();
2996 }
2997
2998 return (0);
2999 }
3000
3001 SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
3002 #endif /* CONFIG_FREEZE */
3003
3004 #if DEVELOPMENT || DEBUG
3005 extern int vm_num_swap_files_config;
3006 extern int vm_num_swap_files;
3007 extern lck_mtx_t vm_swap_data_lock;
3008 #define VM_MAX_SWAP_FILE_NUM 100
3009
3010 static int
3011 sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
3012 {
3013 #pragma unused(arg1, arg2)
3014 int error = 0, val = vm_num_swap_files_config;
3015
3016 error = sysctl_handle_int(oidp, &val, 0, req);
3017 if (error || !req->newptr) {
3018 goto out;
3019 }
3020
3021 if (!VM_CONFIG_SWAP_IS_ACTIVE && !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3022 printf("Swap is disabled\n");
3023 error = EINVAL;
3024 goto out;
3025 }
3026
3027 lck_mtx_lock(&vm_swap_data_lock);
3028
3029 if (val < vm_num_swap_files) {
3030 printf("Cannot configure fewer swap files than already exist.\n");
3031 error = EINVAL;
3032 lck_mtx_unlock(&vm_swap_data_lock);
3033 goto out;
3034 }
3035
3036 if (val > VM_MAX_SWAP_FILE_NUM) {
3037 printf("Capping number of swap files to upper bound.\n");
3038 val = VM_MAX_SWAP_FILE_NUM;
3039 }
3040
3041 vm_num_swap_files_config = val;
3042 lck_mtx_unlock(&vm_swap_data_lock);
3043 out:
3044
3045 return (0);
3046 }
3047
3048 SYSCTL_PROC(_debug, OID_AUTO, num_swap_files_configured, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_config_num_swap_files, "I", "");
3049 #endif /* DEVELOPMENT || DEBUG */
3050
3051 /* this kernel does NOT implement shared_region_make_private_np() */
3052 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
3053 CTLFLAG_RD | CTLFLAG_LOCKED,
3054 (int *)NULL, 0, "");
3055
3056 STATIC int
3057 fetch_process_cputype(
3058 proc_t cur_proc,
3059 int *name,
3060 u_int namelen,
3061 cpu_type_t *cputype)
3062 {
3063 proc_t p = PROC_NULL;
3064 int refheld = 0;
3065 cpu_type_t ret = 0;
3066 int error = 0;
3067
3068 if (namelen == 0)
3069 p = cur_proc;
3070 else if (namelen == 1) {
3071 p = proc_find(name[0]);
3072 if (p == NULL)
3073 return (EINVAL);
3074 refheld = 1;
3075 } else {
3076 error = EINVAL;
3077 goto out;
3078 }
3079
3080 ret = cpu_type() & ~CPU_ARCH_MASK;
3081 if (IS_64BIT_PROCESS(p)) {
3082 ret |= CPU_ARCH_ABI64;
3083 }
3084
3085 *cputype = ret;
3086
3087 if (refheld != 0)
3088 proc_rele(p);
3089 out:
3090 return (error);
3091 }
3092
3093 STATIC int
3094 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3095 struct sysctl_req *req)
3096 {
3097 int error;
3098 cpu_type_t proc_cputype = 0;
3099 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
3100 return error;
3101 int res = 1;
3102 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
3103 res = 0;
3104 return SYSCTL_OUT(req, &res, sizeof(res));
3105 }
3106 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
3107
3108 STATIC int
3109 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
3110 struct sysctl_req *req)
3111 {
3112 int error;
3113 cpu_type_t proc_cputype = 0;
3114 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
3115 return error;
3116 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
3117 }
3118 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
3119
3120 STATIC int
3121 sysctl_safeboot
3122 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3123 {
3124 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
3125 }
3126
3127 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
3128 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3129 0, 0, sysctl_safeboot, "I", "");
3130
3131 STATIC int
3132 sysctl_singleuser
3133 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3134 {
3135 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
3136 }
3137
3138 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
3139 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3140 0, 0, sysctl_singleuser, "I", "");
3141
3142 STATIC int sysctl_minimalboot
3143 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3144 {
3145 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
3146 }
3147
3148 SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
3149 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3150 0, 0, sysctl_minimalboot, "I", "");
3151
3152 /*
3153 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
3154 */
3155 extern boolean_t affinity_sets_enabled;
3156 extern int affinity_sets_mapping;
3157
3158 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
3159 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
3160 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
3161 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
3162
3163 /*
3164 * Boolean indicating if KASLR is active.
3165 */
3166 STATIC int
3167 sysctl_slide
3168 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3169 {
3170 uint32_t slide;
3171
3172 slide = vm_kernel_slide ? 1 : 0;
3173
3174 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
3175 }
3176
3177 SYSCTL_PROC(_kern, OID_AUTO, slide,
3178 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
3179 0, 0, sysctl_slide, "I", "");
3180
3181 /*
3182 * Limit on total memory users can wire.
3183 *
3184 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
3185 *
3186 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
3187 *
3188 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
3189 * kmem_init().
3190 *
3191 * All values are in bytes.
3192 */
3193
3194 vm_map_size_t vm_global_no_user_wire_amount;
3195 vm_map_size_t vm_global_user_wire_limit;
3196 vm_map_size_t vm_user_wire_limit;
3197
3198 /*
3199 * There needs to be a more automatic/elegant way to do this
3200 */
3201 #if defined(__ARM__)
3202 SYSCTL_INT(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, 0, "");
3203 SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
3204 SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, 0, "");
3205 #else
3206 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
3207 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
3208 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
3209 #endif
3210
3211 extern int vm_map_copy_overwrite_aligned_src_not_internal;
3212 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
3213 extern int vm_map_copy_overwrite_aligned_src_large;
3214 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
3215 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
3216 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
3217
3218
3219 extern uint32_t vm_page_external_count;
3220
3221 SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
3222
3223 SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min, 0, "");
3224 SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min, 0, "");
3225
3226 #if DEVELOPMENT || DEBUG
3227 SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min_divisor, 0, "");
3228 SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min_divisor, 0, "");
3229 #endif
3230
3231 extern int vm_compressor_mode;
3232 extern int vm_compressor_is_active;
3233 extern int vm_compressor_available;
3234 extern uint32_t vm_ripe_target_age;
3235 extern uint32_t swapout_target_age;
3236 extern int64_t compressor_bytes_used;
3237 extern int64_t c_segment_input_bytes;
3238 extern int64_t c_segment_compressed_bytes;
3239 extern uint32_t compressor_eval_period_in_msecs;
3240 extern uint32_t compressor_sample_min_in_msecs;
3241 extern uint32_t compressor_sample_max_in_msecs;
3242 extern uint32_t compressor_thrashing_threshold_per_10msecs;
3243 extern uint32_t compressor_thrashing_min_per_10msecs;
3244 extern uint32_t vm_compressor_time_thread;
3245
3246 #if DEVELOPMENT || DEBUG
3247 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
3248 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
3249 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
3250 extern uint32_t vm_compressor_catchup_threshold_divisor;
3251
3252 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden;
3253 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden;
3254 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden;
3255 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden;
3256
3257 extern vmct_stats_t vmct_stats;
3258
3259
3260 STATIC int
3261 sysctl_minorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3262 {
3263 int new_value, changed;
3264 int error = sysctl_io_number(req, vm_compressor_minorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
3265
3266 if (changed) {
3267 vm_compressor_minorcompact_threshold_divisor = new_value;
3268 vm_compressor_minorcompact_threshold_divisor_overridden = 1;
3269 }
3270 return(error);
3271 }
3272
3273 SYSCTL_PROC(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor,
3274 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
3275 0, 0, sysctl_minorcompact_threshold_divisor, "I", "");
3276
3277
3278 STATIC int
3279 sysctl_majorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3280 {
3281 int new_value, changed;
3282 int error = sysctl_io_number(req, vm_compressor_majorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
3283
3284 if (changed) {
3285 vm_compressor_majorcompact_threshold_divisor = new_value;
3286 vm_compressor_majorcompact_threshold_divisor_overridden = 1;
3287 }
3288 return(error);
3289 }
3290
3291 SYSCTL_PROC(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor,
3292 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
3293 0, 0, sysctl_majorcompact_threshold_divisor, "I", "");
3294
3295
3296 STATIC int
3297 sysctl_unthrottle_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3298 {
3299 int new_value, changed;
3300 int error = sysctl_io_number(req, vm_compressor_unthrottle_threshold_divisor, sizeof(int), &new_value, &changed);
3301
3302 if (changed) {
3303 vm_compressor_unthrottle_threshold_divisor = new_value;
3304 vm_compressor_unthrottle_threshold_divisor_overridden = 1;
3305 }
3306 return(error);
3307 }
3308
3309 SYSCTL_PROC(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor,
3310 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
3311 0, 0, sysctl_unthrottle_threshold_divisor, "I", "");
3312
3313
3314 STATIC int
3315 sysctl_catchup_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3316 {
3317 int new_value, changed;
3318 int error = sysctl_io_number(req, vm_compressor_catchup_threshold_divisor, sizeof(int), &new_value, &changed);
3319
3320 if (changed) {
3321 vm_compressor_catchup_threshold_divisor = new_value;
3322 vm_compressor_catchup_threshold_divisor_overridden = 1;
3323 }
3324 return(error);
3325 }
3326
3327 SYSCTL_PROC(_vm, OID_AUTO, compressor_catchup_threshold_divisor,
3328 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
3329 0, 0, sysctl_catchup_threshold_divisor, "I", "");
3330 #endif
3331
3332
3333 SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
3334 SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
3335 SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
3336
3337 SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
3338 SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
3339 SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
3340 SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
3341
3342 SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
3343
3344 SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
3345 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
3346 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
3347 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
3348 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
3349
3350 SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
3351
3352 SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
3353
3354 #if DEVELOPMENT || DEBUG
3355 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[0], "");
3356 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[1], "");
3357
3358 SYSCTL_QUAD(_vm, OID_AUTO, compressor_threads_total, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_cthreads_total, "");
3359
3360 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[0], "");
3361 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[1], "");
3362
3363 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[0], "");
3364 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[1], "");
3365
3366 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[0], 0, "");
3367 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[1], 0, "");
3368
3369 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, "");
3370 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, "");
3371
3372 #endif
3373
3374 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
3375 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
3376 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
3377 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
3378 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
3379
3380 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
3381 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
3382
3383 SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
3384
3385 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
3386
3387 SYSCTL_QUAD(_vm, OID_AUTO, wk_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_cabstime, "");
3388
3389 SYSCTL_QUAD(_vm, OID_AUTO, wkh_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_cabstime, "");
3390 SYSCTL_QUAD(_vm, OID_AUTO, wkh_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_compressions, "");
3391
3392 SYSCTL_QUAD(_vm, OID_AUTO, wks_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_cabstime, "");
3393 SYSCTL_QUAD(_vm, OID_AUTO, wks_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressions, "");
3394
3395 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
3396 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
3397 SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
3398 SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
3399 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
3400 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
3401
3402 SYSCTL_QUAD(_vm, OID_AUTO, wks_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressed_bytes, "");
3403 SYSCTL_QUAD(_vm, OID_AUTO, wks_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compression_failures, "");
3404 SYSCTL_QUAD(_vm, OID_AUTO, wks_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_sv_compressions, "");
3405
3406
3407 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
3408
3409 SYSCTL_QUAD(_vm, OID_AUTO, wk_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_dabstime, "");
3410
3411 SYSCTL_QUAD(_vm, OID_AUTO, wkh_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_dabstime, "");
3412 SYSCTL_QUAD(_vm, OID_AUTO, wkh_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_decompressions, "");
3413
3414 SYSCTL_QUAD(_vm, OID_AUTO, wks_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_dabstime, "");
3415 SYSCTL_QUAD(_vm, OID_AUTO, wks_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_decompressions, "");
3416
3417 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
3418 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
3419
3420 SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
3421 SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
3422 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
3423 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
3424 SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
3425 SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
3426 SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
3427 SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
3428 #if DEVELOPMENT || DEBUG
3429 extern int vm_compressor_current_codec;
3430 extern int vm_compressor_test_seg_wp;
3431 extern boolean_t vm_compressor_force_sw_wkdm;
3432 SYSCTL_INT(_vm, OID_AUTO, compressor_codec, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_current_codec, 0, "");
3433 SYSCTL_INT(_vm, OID_AUTO, compressor_test_wp, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_test_seg_wp, 0, "");
3434
3435 SYSCTL_INT(_vm, OID_AUTO, wksw_force, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_force_sw_wkdm, 0, "");
3436 extern int precompy, wkswhw;
3437
3438 SYSCTL_INT(_vm, OID_AUTO, precompy, CTLFLAG_RW | CTLFLAG_LOCKED, &precompy, 0, "");
3439 SYSCTL_INT(_vm, OID_AUTO, wkswhw, CTLFLAG_RW | CTLFLAG_LOCKED, &wkswhw, 0, "");
3440 extern unsigned int vm_ktrace_enabled;
3441 SYSCTL_INT(_vm, OID_AUTO, vm_ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ktrace_enabled, 0, "");
3442 #endif
3443
3444 #if CONFIG_PHANTOM_CACHE
3445 extern uint32_t phantom_cache_thrashing_threshold;
3446 extern uint32_t phantom_cache_eval_period_in_msecs;
3447 extern uint32_t phantom_cache_thrashing_threshold_ssd;
3448
3449
3450 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
3451 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
3452 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
3453 #endif
3454
3455 #if CONFIG_BACKGROUND_QUEUE
3456
3457 extern uint32_t vm_page_background_count;
3458 extern uint32_t vm_page_background_target;
3459 extern uint32_t vm_page_background_internal_count;
3460 extern uint32_t vm_page_background_external_count;
3461 extern uint32_t vm_page_background_mode;
3462 extern uint32_t vm_page_background_exclude_external;
3463 extern uint64_t vm_page_background_promoted_count;
3464 extern uint64_t vm_pageout_rejected_bq_internal;
3465 extern uint64_t vm_pageout_rejected_bq_external;
3466
3467 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
3468 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
3469 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
3470 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
3471 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
3472 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
3473
3474 SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
3475 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_internal, "");
3476 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_external, "");
3477 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
3478 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
3479
3480 #endif /* CONFIG_BACKGROUND_QUEUE */
3481
3482 extern void vm_update_darkwake_mode(boolean_t);
3483 extern boolean_t vm_darkwake_mode;
3484
3485 STATIC int
3486 sysctl_toggle_darkwake_mode(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3487 {
3488 int new_value, changed;
3489 int error = sysctl_io_number(req, vm_darkwake_mode, sizeof(int), &new_value, &changed);
3490
3491 if ( !error && changed) {
3492
3493 if (new_value != 0 && new_value != 1) {
3494 printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n");
3495 error = EINVAL;
3496 } else {
3497 vm_update_darkwake_mode((boolean_t) new_value);
3498 }
3499 }
3500
3501 return(error);
3502 }
3503
3504 SYSCTL_PROC(_vm, OID_AUTO, darkwake_mode,
3505 CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
3506 0, 0, sysctl_toggle_darkwake_mode, "I", "");
3507
3508 #if (DEVELOPMENT || DEBUG)
3509
3510 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
3511 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3512 &vm_page_creation_throttled_hard, 0, "");
3513
3514 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
3515 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3516 &vm_page_creation_throttled_soft, 0, "");
3517
3518 extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
3519 extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
3520 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
3521 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
3522
3523
3524 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_overrides, 0, "");
3525 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_nops, 0, "");
3526
3527 /* log message counters for persistence mode */
3528 extern uint32_t oslog_p_total_msgcount;
3529 extern uint32_t oslog_p_metadata_saved_msgcount;
3530 extern uint32_t oslog_p_metadata_dropped_msgcount;
3531 extern uint32_t oslog_p_error_count;
3532 extern uint32_t oslog_p_saved_msgcount;
3533 extern uint32_t oslog_p_dropped_msgcount;
3534 extern uint32_t oslog_p_boot_dropped_msgcount;
3535
3536 /* log message counters for streaming mode */
3537 extern uint32_t oslog_s_total_msgcount;
3538 extern uint32_t oslog_s_metadata_msgcount;
3539 extern uint32_t oslog_s_error_count;
3540 extern uint32_t oslog_s_streamed_msgcount;
3541 extern uint32_t oslog_s_dropped_msgcount;
3542
3543 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
3544 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
3545 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
3546 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
3547 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
3548 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
3549 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
3550
3551 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
3552 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
3553 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
3554 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
3555 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
3556
3557
3558 #endif /* DEVELOPMENT || DEBUG */
3559
3560 /*
3561 * Enable tracing of voucher contents
3562 */
3563 extern uint32_t ipc_voucher_trace_contents;
3564
3565 SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
3566 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
3567
3568 /*
3569 * Kernel stack size and depth
3570 */
3571 SYSCTL_INT (_kern, OID_AUTO, stack_size,
3572 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
3573 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
3574 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
3575
3576 extern unsigned int kern_feature_overrides;
3577 SYSCTL_INT (_kern, OID_AUTO, kern_feature_overrides,
3578 CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
3579
3580 /*
3581 * enable back trace for port allocations
3582 */
3583 extern int ipc_portbt;
3584
3585 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
3586 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3587 &ipc_portbt, 0, "");
3588
3589 /*
3590 * Scheduler sysctls
3591 */
3592
3593 SYSCTL_STRING(_kern, OID_AUTO, sched,
3594 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3595 sched_string, sizeof(sched_string),
3596 "Timeshare scheduler implementation");
3597
3598 #if CONFIG_QUIESCE_COUNTER
3599 static int
3600 sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
3601 {
3602 #pragma unused(arg1, arg2)
3603
3604 int error = sysctl_handle_int(oidp, &cpu_checkin_min_interval_us, 0, req);
3605 if (error || !req->newptr)
3606 return error;
3607
3608 cpu_quiescent_counter_set_min_interval_us(cpu_checkin_min_interval_us);
3609
3610 return 0;
3611 }
3612
3613 SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval,
3614 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3615 0, 0,
3616 sysctl_cpu_quiescent_counter_interval, "I",
3617 "Quiescent CPU checkin interval (microseconds)");
3618 #endif /* CONFIG_QUIESCE_COUNTER */
3619
3620
3621 /*
3622 * Only support runtime modification on embedded platforms
3623 * with development config enabled
3624 */
3625 #if CONFIG_EMBEDDED
3626 #if !SECURE_KERNEL
3627 extern int precise_user_kernel_time;
3628 SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
3629 CTLFLAG_RW | CTLFLAG_LOCKED,
3630 &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
3631 #endif
3632 #endif
3633
3634
3635 /* Parameters related to timer coalescing tuning, to be replaced
3636 * with a dedicated systemcall in the future.
3637 */
3638 /* Enable processing pending timers in the context of any other interrupt
3639 * Coalescing tuning parameters for various thread/task attributes */
3640 STATIC int
3641 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3642 {
3643 #pragma unused(oidp)
3644 int size = arg2; /* subcommand*/
3645 int error;
3646 int changed = 0;
3647 uint64_t old_value_ns;
3648 uint64_t new_value_ns;
3649 uint64_t value_abstime;
3650 if (size == sizeof(uint32_t))
3651 value_abstime = *((uint32_t *)arg1);
3652 else if (size == sizeof(uint64_t))
3653 value_abstime = *((uint64_t *)arg1);
3654 else return ENOTSUP;
3655
3656 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
3657 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
3658 if ((error) || (!changed))
3659 return error;
3660
3661 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
3662 if (size == sizeof(uint32_t))
3663 *((uint32_t *)arg1) = (uint32_t)value_abstime;
3664 else
3665 *((uint64_t *)arg1) = value_abstime;
3666 return error;
3667 }
3668
3669 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
3670 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3671 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
3672 SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
3673 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3674 &tcoal_prio_params.timer_resort_threshold_abstime,
3675 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
3676 sysctl_timer_user_us_kernel_abstime,
3677 "Q", "");
3678 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
3679 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3680 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
3681 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
3682 sysctl_timer_user_us_kernel_abstime,
3683 "Q", "");
3684
3685 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
3686 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3687 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
3688
3689 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
3690 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3691 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
3692 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
3693 sysctl_timer_user_us_kernel_abstime,
3694 "Q", "");
3695
3696 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
3697 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3698 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
3699
3700 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
3701 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3702 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
3703 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
3704 sysctl_timer_user_us_kernel_abstime,
3705 "Q", "");
3706
3707 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
3708 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3709 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
3710
3711 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
3712 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3713 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
3714 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
3715 sysctl_timer_user_us_kernel_abstime,
3716 "Q", "");
3717
3718 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
3719 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3720 &tcoal_prio_params.latency_qos_scale[0], 0, "");
3721
3722 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
3723 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3724 &tcoal_prio_params.latency_qos_abstime_max[0],
3725 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
3726 sysctl_timer_user_us_kernel_abstime,
3727 "Q", "");
3728
3729 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
3730 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3731 &tcoal_prio_params.latency_qos_scale[1], 0, "");
3732
3733 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
3734 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3735 &tcoal_prio_params.latency_qos_abstime_max[1],
3736 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
3737 sysctl_timer_user_us_kernel_abstime,
3738 "Q", "");
3739
3740 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
3741 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3742 &tcoal_prio_params.latency_qos_scale[2], 0, "");
3743
3744 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
3745 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3746 &tcoal_prio_params.latency_qos_abstime_max[2],
3747 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
3748 sysctl_timer_user_us_kernel_abstime,
3749 "Q", "");
3750
3751 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
3752 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3753 &tcoal_prio_params.latency_qos_scale[3], 0, "");
3754
3755 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
3756 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3757 &tcoal_prio_params.latency_qos_abstime_max[3],
3758 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
3759 sysctl_timer_user_us_kernel_abstime,
3760 "Q", "");
3761
3762 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
3763 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3764 &tcoal_prio_params.latency_qos_scale[4], 0, "");
3765
3766 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
3767 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3768 &tcoal_prio_params.latency_qos_abstime_max[4],
3769 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
3770 sysctl_timer_user_us_kernel_abstime,
3771 "Q", "");
3772
3773 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
3774 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3775 &tcoal_prio_params.latency_qos_scale[5], 0, "");
3776
3777 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
3778 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3779 &tcoal_prio_params.latency_qos_abstime_max[5],
3780 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
3781 sysctl_timer_user_us_kernel_abstime,
3782 "Q", "");
3783
3784 /* Communicate the "user idle level" heuristic to the timer layer, and
3785 * potentially other layers in the future.
3786 */
3787
3788 static int
3789 timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3790 int new_value = 0, old_value = 0, changed = 0, error;
3791
3792 old_value = timer_get_user_idle_level();
3793
3794 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3795
3796 if (error == 0 && changed) {
3797 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
3798 error = ERANGE;
3799 }
3800
3801 return error;
3802 }
3803
3804 SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
3805 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3806 0, 0,
3807 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
3808
3809 #if HYPERVISOR
3810 SYSCTL_INT(_kern, OID_AUTO, hv_support,
3811 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
3812 &hv_support_available, 0, "");
3813 #endif
3814
3815 #if CONFIG_EMBEDDED
3816 STATIC int
3817 sysctl_darkboot SYSCTL_HANDLER_ARGS
3818 {
3819 int err = 0, value = 0;
3820 #pragma unused(oidp, arg1, arg2, err, value, req)
3821
3822 /*
3823 * Handle the sysctl request.
3824 *
3825 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3826 * we'll get the request identifier into "value" and then we can honor it.
3827 */
3828 if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
3829 goto exit;
3830 }
3831
3832 /* writing requested, let's process the request */
3833 if (req->newptr) {
3834 /* writing is protected by an entitlement */
3835 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
3836 err = EPERM;
3837 goto exit;
3838 }
3839
3840 switch (value) {
3841 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
3842 /*
3843 * If the darkboot sysctl is unset, the NVRAM variable
3844 * must be unset too. If that's not the case, it means
3845 * someone is doing something crazy and not supported.
3846 */
3847 if (darkboot != 0) {
3848 int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
3849 if (ret) {
3850 darkboot = 0;
3851 } else {
3852 err = EINVAL;
3853 }
3854 }
3855 break;
3856 case MEMORY_MAINTENANCE_DARK_BOOT_SET:
3857 darkboot = 1;
3858 break;
3859 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
3860 /*
3861 * Set the NVRAM and update 'darkboot' in case
3862 * of success. Otherwise, do not update
3863 * 'darkboot' and report the failure.
3864 */
3865 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
3866 darkboot = 1;
3867 } else {
3868 err = EINVAL;
3869 }
3870
3871 break;
3872 }
3873 default:
3874 err = EINVAL;
3875 }
3876 }
3877
3878 exit:
3879 return err;
3880 }
3881
3882 SYSCTL_PROC(_kern, OID_AUTO, darkboot,
3883 CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3884 0, 0, sysctl_darkboot, "I", "");
3885 #endif
3886
3887 #if DEVELOPMENT || DEBUG
3888 #include <sys/sysent.h>
3889 /* This should result in a fatal exception, verifying that "sysent" is
3890 * write-protected.
3891 */
3892 static int
3893 kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3894 uint64_t new_value = 0, old_value = 0;
3895 int changed = 0, error;
3896
3897 error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
3898 if ((error == 0) && changed) {
3899 volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
3900 *wraddr = 0;
3901 printf("sysent[0] write succeeded\n");
3902 }
3903 return error;
3904 }
3905
3906 SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
3907 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3908 0, 0,
3909 kern_sysent_write, "I", "Attempt sysent[0] write");
3910
3911 #endif
3912
3913 #if DEVELOPMENT || DEBUG
3914 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
3915 #else
3916 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
3917 #endif
3918
3919
3920 #if DEVELOPMENT || DEBUG
3921
3922 static int
3923 sysctl_panic_test SYSCTL_HANDLER_ARGS
3924 {
3925 #pragma unused(arg1, arg2)
3926 int rval = 0;
3927 char str[32] = "entry prelog postlog postcore";
3928
3929 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3930
3931 if (rval == 0 && req->newptr) {
3932 if (strncmp("entry", str, strlen("entry")) == 0) {
3933 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
3934 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3935 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
3936 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3937 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
3938 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3939 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
3940 }
3941 }
3942
3943 return rval;
3944 }
3945
3946 static int
3947 sysctl_debugger_test SYSCTL_HANDLER_ARGS
3948 {
3949 #pragma unused(arg1, arg2)
3950 int rval = 0;
3951 char str[32] = "entry prelog postlog postcore";
3952
3953 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3954
3955 if (rval == 0 && req->newptr) {
3956 if (strncmp("entry", str, strlen("entry")) == 0) {
3957 DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
3958 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3959 DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
3960 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3961 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
3962 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3963 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
3964 }
3965 }
3966
3967 return rval;
3968 }
3969
3970 decl_lck_spin_data(, spinlock_panic_test_lock)
3971
3972 __attribute__((noreturn))
3973 static void
3974 spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
3975 {
3976 lck_spin_lock(&spinlock_panic_test_lock);
3977 while (1) { ; }
3978 }
3979
3980 static int
3981 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
3982 {
3983 #pragma unused(oidp, arg1, arg2)
3984 if (req->newlen == 0)
3985 return EINVAL;
3986
3987 thread_t panic_spinlock_thread;
3988 /* Initialize panic spinlock */
3989 lck_grp_t * panic_spinlock_grp;
3990 lck_grp_attr_t * panic_spinlock_grp_attr;
3991 lck_attr_t * panic_spinlock_attr;
3992
3993 panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
3994 panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
3995 panic_spinlock_attr = lck_attr_alloc_init();
3996
3997 lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
3998
3999
4000 /* Create thread to acquire spinlock */
4001 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
4002 return EBUSY;
4003 }
4004
4005 /* Try to acquire spinlock -- should panic eventually */
4006 lck_spin_lock(&spinlock_panic_test_lock);
4007 while(1) { ; }
4008 }
4009
4010 __attribute__((noreturn))
4011 static void
4012 simultaneous_panic_worker
4013 (void * arg, wait_result_t wres __unused)
4014 {
4015 atomic_int *start_panic = (atomic_int *)arg;
4016
4017 while (!atomic_load(start_panic)) { ; }
4018 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
4019 __builtin_unreachable();
4020 }
4021
4022 static int
4023 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
4024 {
4025 #pragma unused(oidp, arg1, arg2)
4026 if (req->newlen == 0)
4027 return EINVAL;
4028
4029 int i = 0, threads_to_create = 2 * processor_count;
4030 atomic_int start_panic = 0;
4031 unsigned int threads_created = 0;
4032 thread_t new_panic_thread;
4033
4034 for (i = threads_to_create; i > 0; i--) {
4035 if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
4036 threads_created++;
4037 }
4038 }
4039
4040 /* FAIL if we couldn't create at least processor_count threads */
4041 if (threads_created < processor_count) {
4042 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
4043 threads_created, threads_to_create);
4044 }
4045
4046 atomic_exchange(&start_panic, 1);
4047 while (1) { ; }
4048 }
4049
4050 SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
4051 SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
4052 SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
4053 SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
4054
4055 extern int exc_resource_threads_enabled;
4056
4057 SYSCTL_INT(_kern, OID_AUTO, exc_resource_threads_enabled, CTLFLAG_RD | CTLFLAG_LOCKED, &exc_resource_threads_enabled, 0, "exc_resource thread limit enabled");
4058
4059
4060 #endif /* DEVELOPMENT || DEBUG */
4061
4062 const uint32_t thread_groups_supported = 0;
4063
4064 STATIC int
4065 sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4066 {
4067 int value = thread_groups_supported;
4068 return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
4069 }
4070
4071 SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
4072 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
4073
4074 static int
4075 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
4076 {
4077 #pragma unused(arg1, arg2, oidp)
4078 int error = 0;
4079 int type_tuple[2] = {};
4080 int return_value = 0;
4081
4082 error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
4083
4084 if (error) {
4085 return error;
4086 }
4087
4088 return_value = grade_binary(type_tuple[0], type_tuple[1]);
4089
4090 error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
4091
4092 if (error) {
4093 return error;
4094 }
4095
4096 return error;
4097 }
4098
4099 SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
4100 CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED|CTLTYPE_OPAQUE,
4101 0, 0, &sysctl_grade_cputype, "S",
4102 "grade value of cpu_type_t+cpu_sub_type_t");
4103
4104
4105 #if DEVELOPMENT || DEBUG
4106
4107 static atomic_int wedge_thread_should_wake = 0;
4108
4109 static int
4110 unwedge_thread SYSCTL_HANDLER_ARGS
4111 {
4112 #pragma unused(arg1, arg2)
4113 int error, val = 0;
4114 error = sysctl_handle_int(oidp, &val, 0, req);
4115 if (error || val == 0) {
4116 return error;
4117 }
4118
4119 atomic_store(&wedge_thread_should_wake, 1);
4120 return 0;
4121 }
4122
4123 SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, unwedge_thread, "I", "unwedge the thread wedged by kern.wedge_thread");
4124
4125 extern uintptr_t phys_carveout_pa;
4126 SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED,
4127 &phys_carveout_pa,
4128 "base physical address of the phys_carveout_mb boot-arg region");
4129 extern size_t phys_carveout_size;
4130 SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED,
4131 &phys_carveout_size,
4132 "size in bytes of the phys_carveout_mb boot-arg region");
4133
4134 static int
4135 wedge_thread SYSCTL_HANDLER_ARGS
4136 {
4137 #pragma unused(arg1, arg2)
4138
4139 int error, val = 0;
4140 error = sysctl_handle_int(oidp, &val, 0, req);
4141 if (error || val == 0) {
4142 return error;
4143 }
4144
4145 uint64_t interval = 1;
4146 nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval);
4147
4148 atomic_store(&wedge_thread_should_wake, 0);
4149 while (!atomic_load(&wedge_thread_should_wake)) {
4150 tsleep1(NULL, 0, "wedge_thread", mach_absolute_time()+interval, NULL);
4151 }
4152
4153 return 0;
4154 }
4155
4156 SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", "wedge this thread so it cannot be cleaned up");
4157
4158 static int
4159 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS;
4160 static int
4161 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS;
4162 int
4163 tstile_test_prim_lock(boolean_t use_hashtable);
4164 int
4165 tstile_test_prim_unlock(boolean_t use_hashtable);
4166
4167 #define SYSCTL_TURNSTILE_TEST_DEFAULT 1
4168 #define SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE 2
4169
4170 static int
4171 sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
4172 {
4173 #pragma unused(arg1, arg2)
4174 int error, val = 0;
4175 error = sysctl_handle_int(oidp, &val, 0, req);
4176 if (error || val == 0) {
4177 return error;
4178 }
4179 boolean_t use_hashtable = (val == SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE) ? true : false;
4180 return tstile_test_prim_lock(use_hashtable);
4181 }
4182
4183 static int
4184 sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
4185 {
4186 #pragma unused(arg1, arg2)
4187 int error, val = 0;
4188 error = sysctl_handle_int(oidp, &val, 0, req);
4189 if (error || val == 0) {
4190 return error;
4191 }
4192 boolean_t use_hashtable = (val == SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE) ? true : false;
4193 return tstile_test_prim_unlock(use_hashtable);
4194 }
4195
4196 SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_lock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4197 0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock");
4198
4199 SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_unlock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4200 0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock");
4201
4202 int
4203 turnstile_get_boost_stats_sysctl(void *req);
4204 int
4205 turnstile_get_unboost_stats_sysctl(void *req);
4206 static int
4207 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS;
4208 static int
4209 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS;
4210 extern uint64_t thread_block_on_turnstile_count;
4211 extern uint64_t thread_block_on_regular_waitq_count;
4212
4213 static int
4214 sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
4215 {
4216 #pragma unused(arg1, arg2, oidp)
4217 return turnstile_get_boost_stats_sysctl(req);
4218 }
4219
4220 static int
4221 sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
4222 {
4223 #pragma unused(arg1, arg2, oidp)
4224 return turnstile_get_unboost_stats_sysctl(req);
4225 }
4226
4227 SYSCTL_PROC(_kern, OID_AUTO, turnstile_boost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
4228 0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats");
4229 SYSCTL_PROC(_kern, OID_AUTO, turnstile_unboost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
4230 0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats");
4231 SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_turnstile,
4232 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4233 &thread_block_on_turnstile_count, "thread blocked on turnstile count");
4234 SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_reg_waitq,
4235 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4236 &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count");
4237
4238 static int
4239 sysctl_lck_mtx_test_lock SYSCTL_HANDLER_ARGS
4240 {
4241 #pragma unused(arg1, arg2)
4242 int error, val = 0;
4243 error = sysctl_handle_int(oidp, &val, 0, req);
4244 if (error || val == 0) {
4245 return error;
4246 }
4247
4248 if (val == 1) {
4249 lck_mtx_test_init();
4250 lck_mtx_test_lock();
4251 }
4252
4253 return 0;
4254 }
4255
4256 static int
4257 sysctl_lck_mtx_test_unlock SYSCTL_HANDLER_ARGS
4258 {
4259 #pragma unused(arg1, arg2)
4260 int error, val = 0;
4261 error = sysctl_handle_int(oidp, &val, 0, req);
4262 if (error || val == 0) {
4263 return error;
4264 }
4265
4266 if (val == 1) {
4267 lck_mtx_test_init();
4268 lck_mtx_test_unlock();
4269 }
4270
4271 return 0;
4272 }
4273
4274 static int
4275 sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
4276 {
4277 #pragma unused(arg1, arg2)
4278 int error, val = 0;
4279 error = sysctl_handle_int(oidp, &val, 0, req);
4280 if (error || val == 0) {
4281 return error;
4282 }
4283
4284 if (val == 1) {
4285 lck_mtx_test_init();
4286 erase_all_test_mtx_stats();
4287 }
4288
4289 return 0;
4290 }
4291
4292 static int
4293 sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
4294 {
4295 #pragma unused(oidp, arg1, arg2)
4296 char* buffer;
4297 int size, buffer_size, error;
4298
4299 buffer_size = 1000;
4300 buffer = kalloc(buffer_size);
4301 if (!buffer)
4302 panic("Impossible to allocate memory for %s\n", __func__);
4303
4304 lck_mtx_test_init();
4305
4306 size = get_test_mtx_stats_string(buffer, buffer_size);
4307
4308 error = sysctl_io_string(req, buffer, size, 0, NULL);
4309
4310 kfree(buffer, buffer_size);
4311
4312 return error;
4313 }
4314
4315 static int
4316 sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
4317 {
4318 #pragma unused(oidp, arg1, arg2)
4319 char* buffer;
4320 int buffer_size, offset, error, iter;
4321 char input_val[40];
4322
4323 if (!req->newptr) {
4324 return 0;
4325 }
4326
4327 if (!req->oldptr) {
4328 return EINVAL;
4329 }
4330
4331 if (req->newlen >= sizeof(input_val)) {
4332 return EINVAL;
4333 }
4334
4335 error = SYSCTL_IN(req, input_val, req->newlen);
4336 if (error) {
4337 return error;
4338 }
4339 input_val[req->newlen] = '\0';
4340
4341 sscanf(input_val, "%d", &iter);
4342
4343 if (iter <= 0) {
4344 printf("%s requested %d iterations, not starting the test\n", __func__, iter);
4345 return EINVAL;
4346 }
4347
4348 lck_mtx_test_init();
4349
4350 buffer_size = 2000;
4351 offset = 0;
4352 buffer = kalloc(buffer_size);
4353 if (!buffer)
4354 panic("Impossible to allocate memory for %s\n", __func__);
4355 memset(buffer, 0, buffer_size);
4356
4357 printf("%s starting uncontended mutex test with %d iterations\n", __func__, iter);
4358
4359 offset = snprintf(buffer, buffer_size, "STATS INNER LOOP");
4360 offset += lck_mtx_test_mtx_uncontended(iter, &buffer[offset], buffer_size - offset);
4361
4362 offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
4363 offset += lck_mtx_test_mtx_uncontended_loop_time(iter, &buffer[offset], buffer_size - offset);
4364
4365 error = SYSCTL_OUT(req, buffer, offset);
4366
4367 kfree(buffer, buffer_size);
4368 return error;
4369 }
4370
4371 static int
4372 sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
4373 {
4374 #pragma unused(oidp, arg1, arg2)
4375 char* buffer;
4376 int buffer_size, offset, error, iter;
4377 char input_val[40];
4378
4379 printf("%s called\n", __func__);
4380
4381 if (!req->newptr) {
4382 return 0;
4383 }
4384
4385 if (!req->oldptr) {
4386 return EINVAL;
4387 }
4388
4389 if (req->newlen >= sizeof(input_val)) {
4390 return EINVAL;
4391 }
4392
4393 error = SYSCTL_IN(req, input_val, req->newlen);
4394 if (error) {
4395 return error;
4396 }
4397 input_val[req->newlen] = '\0';
4398
4399 sscanf(input_val, "%d", &iter);
4400
4401 if (iter <= 0) {
4402 printf("%s requested %d iterations, not starting the test\n", __func__, iter);
4403 return EINVAL;
4404 }
4405
4406 lck_mtx_test_init();
4407
4408 erase_all_test_mtx_stats();
4409
4410 buffer_size = 1000;
4411 offset = 0;
4412 buffer = kalloc(buffer_size);
4413 if (!buffer)
4414 panic("Impossible to allocate memory for %s\n", __func__);
4415 memset(buffer, 0, buffer_size);
4416
4417 printf("%s starting contended mutex test with %d iterations\n", __func__, iter);
4418
4419 offset = snprintf(buffer, buffer_size, "STATS INNER LOOP");
4420 offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset);
4421
4422 printf("%s starting contended mutex loop test with %d iterations\n", __func__, iter);
4423
4424 offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
4425 offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset);
4426
4427 error = SYSCTL_OUT(req, buffer, offset);
4428
4429 kfree(buffer, buffer_size);
4430
4431 return error;
4432 }
4433
4434 SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_lock, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4435 0, 0, sysctl_lck_mtx_test_lock, "I", "lck mtx test lock");
4436
4437 SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_unlock, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4438 0, 0, sysctl_lck_mtx_test_unlock, "I", "lck mtx test unlock");
4439
4440 SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
4441 0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics");
4442
4443 SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED| CTLFLAG_KERN | CTLFLAG_LOCKED,
4444 0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics");
4445
4446 SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
4447 0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test");
4448
4449 SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
4450 0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test");
4451
4452 #if defined (__x86_64__)
4453
4454 semaphore_t sysctl_test_panic_with_thread_sem;
4455
4456 #pragma clang diagnostic push
4457 #pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
4458 __attribute__((noreturn))
4459 static void
4460 panic_thread_test_child_spin(void * arg, wait_result_t wres)
4461 {
4462 static int panic_thread_recurse_count = 5;
4463
4464 if (panic_thread_recurse_count > 0) {
4465 panic_thread_recurse_count--;
4466 panic_thread_test_child_spin(arg, wres);
4467 }
4468
4469 semaphore_signal(sysctl_test_panic_with_thread_sem);
4470 while (1) { ; }
4471 }
4472 #pragma clang diagnostic pop
4473
4474 static void
4475 panic_thread_test_child_park(void * arg __unused, wait_result_t wres __unused)
4476 {
4477 int event;
4478
4479 assert_wait(&event, THREAD_UNINT);
4480 semaphore_signal(sysctl_test_panic_with_thread_sem);
4481 thread_block(panic_thread_test_child_park);
4482 }
4483
4484 static int
4485 sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
4486 {
4487 #pragma unused(arg1, arg2)
4488 int rval = 0;
4489 char str[16] = { '\0' };
4490 thread_t child_thread = THREAD_NULL;
4491
4492 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
4493 if (rval != 0 || !req->newptr) {
4494 return EINVAL;
4495 }
4496
4497 semaphore_create(kernel_task, &sysctl_test_panic_with_thread_sem, SYNC_POLICY_FIFO, 0);
4498
4499 /* Create thread to spin or park in continuation */
4500 if (strncmp("spin", str, strlen("spin")) == 0) {
4501 if (kernel_thread_start(panic_thread_test_child_spin, NULL, &child_thread) != KERN_SUCCESS) {
4502 semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
4503 return EBUSY;
4504 }
4505 } else if (strncmp("continuation", str, strlen("continuation")) == 0) {
4506 if (kernel_thread_start(panic_thread_test_child_park, NULL, &child_thread) != KERN_SUCCESS) {
4507 semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
4508 return EBUSY;
4509 }
4510 } else {
4511 semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
4512 return EINVAL;
4513 }
4514
4515 semaphore_wait(sysctl_test_panic_with_thread_sem);
4516
4517 panic_with_thread_context(0, NULL, 0, child_thread, "testing panic_with_thread_context for thread %p", child_thread);
4518
4519 /* Not reached */
4520 return EINVAL;
4521 }
4522
4523 SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread, CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING,
4524 0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread");
4525 #endif /* defined (__x86_64__) */
4526 #endif /* DEVELOPMENT || DEBUG */