]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
63bcc0443e4d26625ecf997e432aa56b1f216eaf
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
108
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
111
112 #include <machine/smp.h>
113 #include <mach/machine.h>
114 #include <mach/mach_host.h>
115 #include <mach/mach_types.h>
116 #include <mach/processor_info.h>
117 #include <mach/vm_param.h>
118 #include <kern/debug.h>
119 #include <kern/mach_param.h>
120 #include <kern/task.h>
121 #include <kern/thread.h>
122 #include <kern/thread_group.h>
123 #include <kern/processor.h>
124 #include <kern/cpu_number.h>
125 #include <kern/debug.h>
126 #include <kern/sched_prim.h>
127 #include <vm/vm_kern.h>
128 #include <vm/vm_map.h>
129 #include <mach/host_info.h>
130
131 #include <sys/mount_internal.h>
132 #include <sys/kdebug.h>
133
134 #include <IOKit/IOPlatformExpert.h>
135 #include <pexpert/pexpert.h>
136
137 #include <machine/machine_routines.h>
138 #include <machine/exec.h>
139
140 #include <vm/vm_protos.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_compressor_algorithms.h>
143 #include <sys/imgsrc.h>
144 #include <kern/timer_call.h>
145
146 #if defined(__i386__) || defined(__x86_64__)
147 #include <i386/cpuid.h>
148 #endif
149
150 #if CONFIG_FREEZE
151 #include <sys/kern_memorystatus.h>
152 #endif
153
154 #if KPERF
155 #include <kperf/kperf.h>
156 #endif
157
158 #if HYPERVISOR
159 #include <kern/hv_support.h>
160 #endif
161
162 /*
163 * deliberately setting max requests to really high number
164 * so that runaway settings do not cause MALLOC overflows
165 */
166 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
167
168 extern int aio_max_requests;
169 extern int aio_max_requests_per_process;
170 extern int aio_worker_threads;
171 extern int lowpri_IO_window_msecs;
172 extern int lowpri_IO_delay_msecs;
173 extern int nx_enabled;
174 extern int speculative_reads_disabled;
175 extern unsigned int speculative_prefetch_max;
176 extern unsigned int speculative_prefetch_max_iosize;
177 extern unsigned int preheat_max_bytes;
178 extern unsigned int preheat_min_bytes;
179 extern long numvnodes;
180
181 extern uuid_string_t bootsessionuuid_string;
182
183 extern unsigned int vm_max_delayed_work_limit;
184 extern unsigned int vm_max_batch;
185
186 extern unsigned int vm_page_free_min;
187 extern unsigned int vm_page_free_target;
188 extern unsigned int vm_page_free_reserved;
189 extern unsigned int vm_page_speculative_percentage;
190 extern unsigned int vm_page_speculative_q_age_ms;
191
192 #if (DEVELOPMENT || DEBUG)
193 extern uint32_t vm_page_creation_throttled_hard;
194 extern uint32_t vm_page_creation_throttled_soft;
195 #endif /* DEVELOPMENT || DEBUG */
196
197 /*
198 * Conditionally allow dtrace to see these functions for debugging purposes.
199 */
200 #ifdef STATIC
201 #undef STATIC
202 #endif
203 #if 0
204 #define STATIC
205 #else
206 #define STATIC static
207 #endif
208
209 extern boolean_t mach_timer_coalescing_enabled;
210
211 extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
212
213 STATIC void
214 fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
215 STATIC void
216 fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
217 STATIC void
218 fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
219 STATIC void
220 fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
221 STATIC void
222 fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
223 STATIC void
224 fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
225
226 extern int
227 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
228 #if NFSCLIENT
229 extern int
230 netboot_root(void);
231 #endif
232 int
233 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
234 proc_t p);
235 int
236 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
237 size_t *sizep, proc_t cur_proc);
238 STATIC int
239 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
240 proc_t cur_proc, int argc_yes);
241 int
242 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
243 size_t newlen, void *sp, int len);
244
245 STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
246 STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
247 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
248 STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
249 STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
250 int sysdoproc_callback(proc_t p, void *arg);
251
252
253 /* forward declarations for non-static STATIC */
254 STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
255 STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
256 STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
257 STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
258 STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
259 STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
260 #if COUNT_SYSCALLS
261 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
262 #endif /* COUNT_SYSCALLS */
263 #if !CONFIG_EMBEDDED
264 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
265 #endif /* !CONFIG_EMBEDDED */
266 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
267 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
268 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269 STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270 STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271 STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272 STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273 STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274 STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275 STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
276 STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277 STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278 STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
279 STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280 STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
281 #if NFSCLIENT
282 STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283 #endif
284 #ifdef CONFIG_IMGSRC_ACCESS
285 STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286 #endif
287 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
288 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289 #if CONFIG_COREDUMP
290 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292 #endif
293 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296 STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297 STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298 STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299 STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
300 STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
301 STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
302 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
303 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
304 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
305 STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
306 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
307
308
309 extern void IORegistrySetOSBuildVersion(char * build_version);
310
311 STATIC void
312 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
313 {
314 la64->ldavg[0] = la->ldavg[0];
315 la64->ldavg[1] = la->ldavg[1];
316 la64->ldavg[2] = la->ldavg[2];
317 la64->fscale = (user64_long_t)la->fscale;
318 }
319
320 STATIC void
321 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
322 {
323 la32->ldavg[0] = la->ldavg[0];
324 la32->ldavg[1] = la->ldavg[1];
325 la32->ldavg[2] = la->ldavg[2];
326 la32->fscale = (user32_long_t)la->fscale;
327 }
328
329 #if CONFIG_COREDUMP
330 /*
331 * Attributes stored in the kernel.
332 */
333 extern char corefilename[MAXPATHLEN+1];
334 extern int do_coredump;
335 extern int sugid_coredump;
336 #endif
337
338 #if COUNT_SYSCALLS
339 extern int do_count_syscalls;
340 #endif
341
342 #ifdef INSECURE
343 int securelevel = -1;
344 #else
345 int securelevel;
346 #endif
347
348 STATIC int
349 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
350 __unused int arg2, struct sysctl_req *req)
351 {
352 int error;
353 struct uthread *ut = get_bsdthread_info(current_thread());
354 user_addr_t oldp=0, newp=0;
355 size_t *oldlenp=NULL;
356 size_t newlen=0;
357
358 oldp = req->oldptr;
359 oldlenp = &(req->oldlen);
360 newp = req->newptr;
361 newlen = req->newlen;
362
363 /* We want the current length, and maybe the string itself */
364 if(oldlenp) {
365 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
366 size_t currlen = MAXTHREADNAMESIZE - 1;
367
368 if(ut->pth_name)
369 /* use length of current thread name */
370 currlen = strlen(ut->pth_name);
371 if(oldp) {
372 if(*oldlenp < currlen)
373 return ENOMEM;
374 /* NOTE - we do not copy the NULL terminator */
375 if(ut->pth_name) {
376 error = copyout(ut->pth_name,oldp,currlen);
377 if(error)
378 return error;
379 }
380 }
381 /* return length of thread name minus NULL terminator (just like strlen) */
382 req->oldidx = currlen;
383 }
384
385 /* We want to set the name to something */
386 if(newp)
387 {
388 if(newlen > (MAXTHREADNAMESIZE - 1))
389 return ENAMETOOLONG;
390 if(!ut->pth_name)
391 {
392 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
393 if(!ut->pth_name)
394 return ENOMEM;
395 } else {
396 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
397 }
398 bzero(ut->pth_name, MAXTHREADNAMESIZE);
399 error = copyin(newp, ut->pth_name, newlen);
400 if (error) {
401 return error;
402 }
403
404 kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
405 }
406
407 return 0;
408 }
409
410 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
411
412 #define BSD_HOST 1
413 STATIC int
414 sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
415 {
416 host_basic_info_data_t hinfo;
417 kern_return_t kret;
418 uint32_t size;
419 int changed;
420 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
421 struct _processor_statistics_np *buf;
422 int error;
423
424 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
425 if (kret != KERN_SUCCESS) {
426 return EINVAL;
427 }
428
429 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
430
431 if (req->oldlen < size) {
432 return EINVAL;
433 }
434
435 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
436
437 kret = get_sched_statistics(buf, &size);
438 if (kret != KERN_SUCCESS) {
439 error = EINVAL;
440 goto out;
441 }
442
443 error = sysctl_io_opaque(req, buf, size, &changed);
444 if (error) {
445 goto out;
446 }
447
448 if (changed) {
449 panic("Sched info changed?!");
450 }
451 out:
452 FREE(buf, M_TEMP);
453 return error;
454 }
455
456 SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
457
458 STATIC int
459 sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
460 {
461 boolean_t active;
462 int res;
463
464 if (req->newlen != sizeof(active)) {
465 return EINVAL;
466 }
467
468 res = copyin(req->newptr, &active, sizeof(active));
469 if (res != 0) {
470 return res;
471 }
472
473 return set_sched_stats_active(active);
474 }
475
476 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
477
478 extern uint32_t sched_debug_flags;
479 SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
480
481 #if (DEBUG || DEVELOPMENT)
482 extern boolean_t doprnt_hide_pointers;
483 SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
484 #endif
485
486 extern int get_kernel_symfile(proc_t, char **);
487
488 #if COUNT_SYSCALLS
489 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
490
491 extern unsigned int nsysent;
492 extern int syscalls_log[];
493 extern const char *syscallnames[];
494
495 STATIC int
496 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
497 {
498 __unused int cmd = oidp->oid_arg2; /* subcommand*/
499 __unused int *name = arg1; /* oid element argument vector */
500 __unused int namelen = arg2; /* number of oid element arguments */
501 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
502 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
503 user_addr_t newp = req->newptr; /* user buffer copy in address */
504 size_t newlen = req->newlen; /* user buffer copy in size */
505 int error;
506
507 int tmp;
508
509 /* valid values passed in:
510 * = 0 means don't keep called counts for each bsd syscall
511 * > 0 means keep called counts for each bsd syscall
512 * = 2 means dump current counts to the system log
513 * = 3 means reset all counts
514 * for example, to dump current counts:
515 * sysctl -w kern.count_calls=2
516 */
517 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
518 if ( error != 0 ) {
519 return (error);
520 }
521
522 if ( tmp == 1 ) {
523 do_count_syscalls = 1;
524 }
525 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
526 int i;
527 for ( i = 0; i < nsysent; i++ ) {
528 if ( syscalls_log[i] != 0 ) {
529 if ( tmp == 2 ) {
530 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
531 }
532 else {
533 syscalls_log[i] = 0;
534 }
535 }
536 }
537 if ( tmp != 0 ) {
538 do_count_syscalls = 1;
539 }
540 }
541
542 /* adjust index so we return the right required/consumed amount */
543 if (!error)
544 req->oldidx += req->oldlen;
545
546 return (error);
547 }
548 SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
549 0, /* Pointer argument (arg1) */
550 0, /* Integer argument (arg2) */
551 sysctl_docountsyscalls, /* Handler function */
552 NULL, /* Data pointer */
553 "");
554 #endif /* COUNT_SYSCALLS */
555
556 /*
557 * The following sysctl_* functions should not be used
558 * any more, as they can only cope with callers in
559 * user mode: Use new-style
560 * sysctl_io_number()
561 * sysctl_io_string()
562 * sysctl_io_opaque()
563 * instead.
564 */
565
566 /*
567 * Validate parameters and get old / set new parameters
568 * for an integer-valued sysctl function.
569 */
570 int
571 sysctl_int(user_addr_t oldp, size_t *oldlenp,
572 user_addr_t newp, size_t newlen, int *valp)
573 {
574 int error = 0;
575
576 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
577 return (EFAULT);
578 if (oldp && *oldlenp < sizeof(int))
579 return (ENOMEM);
580 if (newp && newlen != sizeof(int))
581 return (EINVAL);
582 *oldlenp = sizeof(int);
583 if (oldp)
584 error = copyout(valp, oldp, sizeof(int));
585 if (error == 0 && newp) {
586 error = copyin(newp, valp, sizeof(int));
587 AUDIT_ARG(value32, *valp);
588 }
589 return (error);
590 }
591
592 /*
593 * Validate parameters and get old / set new parameters
594 * for an quad(64bit)-valued sysctl function.
595 */
596 int
597 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
598 user_addr_t newp, size_t newlen, quad_t *valp)
599 {
600 int error = 0;
601
602 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
603 return (EFAULT);
604 if (oldp && *oldlenp < sizeof(quad_t))
605 return (ENOMEM);
606 if (newp && newlen != sizeof(quad_t))
607 return (EINVAL);
608 *oldlenp = sizeof(quad_t);
609 if (oldp)
610 error = copyout(valp, oldp, sizeof(quad_t));
611 if (error == 0 && newp)
612 error = copyin(newp, valp, sizeof(quad_t));
613 return (error);
614 }
615
616 STATIC int
617 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
618 {
619 if (p->p_pid != (pid_t)*(int*)arg)
620 return(0);
621 else
622 return(1);
623 }
624
625 STATIC int
626 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
627 {
628 if (p->p_pgrpid != (pid_t)*(int*)arg)
629 return(0);
630 else
631 return(1);
632 }
633
634 STATIC int
635 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
636 {
637 int retval;
638 struct tty *tp;
639
640 /* This is very racy but list lock is held.. Hmmm. */
641 if ((p->p_flag & P_CONTROLT) == 0 ||
642 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
643 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
644 tp->t_dev != (dev_t)*(int*)arg)
645 retval = 0;
646 else
647 retval = 1;
648
649 return(retval);
650 }
651
652 STATIC int
653 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
654 {
655 kauth_cred_t my_cred;
656 uid_t uid;
657
658 if (p->p_ucred == NULL)
659 return(0);
660 my_cred = kauth_cred_proc_ref(p);
661 uid = kauth_cred_getuid(my_cred);
662 kauth_cred_unref(&my_cred);
663
664 if (uid != (uid_t)*(int*)arg)
665 return(0);
666 else
667 return(1);
668 }
669
670
671 STATIC int
672 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
673 {
674 kauth_cred_t my_cred;
675 uid_t ruid;
676
677 if (p->p_ucred == NULL)
678 return(0);
679 my_cred = kauth_cred_proc_ref(p);
680 ruid = kauth_cred_getruid(my_cred);
681 kauth_cred_unref(&my_cred);
682
683 if (ruid != (uid_t)*(int*)arg)
684 return(0);
685 else
686 return(1);
687 }
688
689 /*
690 * try over estimating by 5 procs
691 */
692 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
693 struct sysdoproc_args {
694 int buflen;
695 void *kprocp;
696 boolean_t is_64_bit;
697 user_addr_t dp;
698 size_t needed;
699 int sizeof_kproc;
700 int *errorp;
701 int uidcheck;
702 int ruidcheck;
703 int ttycheck;
704 int uidval;
705 };
706
707 int
708 sysdoproc_callback(proc_t p, void *arg)
709 {
710 struct sysdoproc_args *args = arg;
711
712 if (args->buflen >= args->sizeof_kproc) {
713 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
714 return (PROC_RETURNED);
715 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
716 return (PROC_RETURNED);
717 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
718 return (PROC_RETURNED);
719
720 bzero(args->kprocp, args->sizeof_kproc);
721 if (args->is_64_bit)
722 fill_user64_proc(p, args->kprocp);
723 else
724 fill_user32_proc(p, args->kprocp);
725 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
726 if (error) {
727 *args->errorp = error;
728 return (PROC_RETURNED_DONE);
729 }
730 args->dp += args->sizeof_kproc;
731 args->buflen -= args->sizeof_kproc;
732 }
733 args->needed += args->sizeof_kproc;
734 return (PROC_RETURNED);
735 }
736
737 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
738 STATIC int
739 sysctl_prochandle SYSCTL_HANDLER_ARGS
740 {
741 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
742 int *name = arg1; /* oid element argument vector */
743 int namelen = arg2; /* number of oid element arguments */
744 user_addr_t where = req->oldptr;/* user buffer copy out address */
745
746 user_addr_t dp = where;
747 size_t needed = 0;
748 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
749 int error = 0;
750 boolean_t is_64_bit = proc_is64bit(current_proc());
751 struct user32_kinfo_proc user32_kproc;
752 struct user64_kinfo_proc user_kproc;
753 int sizeof_kproc;
754 void *kprocp;
755 int (*filterfn)(proc_t, void *) = 0;
756 struct sysdoproc_args args;
757 int uidcheck = 0;
758 int ruidcheck = 0;
759 int ttycheck = 0;
760 int success = 0;
761
762 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
763 return (EINVAL);
764
765 if (is_64_bit) {
766 sizeof_kproc = sizeof(user_kproc);
767 kprocp = &user_kproc;
768 } else {
769 sizeof_kproc = sizeof(user32_kproc);
770 kprocp = &user32_kproc;
771 }
772
773 switch (cmd) {
774
775 case KERN_PROC_PID:
776 filterfn = sysdoproc_filt_KERN_PROC_PID;
777 break;
778
779 case KERN_PROC_PGRP:
780 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
781 break;
782
783 case KERN_PROC_TTY:
784 ttycheck = 1;
785 break;
786
787 case KERN_PROC_UID:
788 uidcheck = 1;
789 break;
790
791 case KERN_PROC_RUID:
792 ruidcheck = 1;
793 break;
794
795 case KERN_PROC_ALL:
796 break;
797
798 default:
799 /* must be kern.proc.<unknown> */
800 return (ENOTSUP);
801 }
802
803 error = 0;
804 args.buflen = buflen;
805 args.kprocp = kprocp;
806 args.is_64_bit = is_64_bit;
807 args.dp = dp;
808 args.needed = needed;
809 args.errorp = &error;
810 args.uidcheck = uidcheck;
811 args.ruidcheck = ruidcheck;
812 args.ttycheck = ttycheck;
813 args.sizeof_kproc = sizeof_kproc;
814 if (namelen)
815 args.uidval = name[0];
816
817 success = proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
818 sysdoproc_callback, &args, filterfn, name);
819
820 /*
821 * rdar://problem/28433391: if we can't iterate over the processes,
822 * make sure to return an error.
823 */
824
825 if (success != 0)
826 return (ENOMEM);
827
828 if (error)
829 return (error);
830
831 dp = args.dp;
832 needed = args.needed;
833
834 if (where != USER_ADDR_NULL) {
835 req->oldlen = dp - where;
836 if (needed > req->oldlen)
837 return (ENOMEM);
838 } else {
839 needed += KERN_PROCSLOP;
840 req->oldlen = needed;
841 }
842 /* adjust index so we return the right required/consumed amount */
843 req->oldidx += req->oldlen;
844 return (0);
845 }
846
847 /*
848 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
849 * in the sysctl declaration itself, which comes into the handler function
850 * as 'oidp->oid_arg2'.
851 *
852 * For these particular sysctls, since they have well known OIDs, we could
853 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
854 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
855 * of a well known value with a common handler function. This is desirable,
856 * because we want well known values to "go away" at some future date.
857 *
858 * It should be noted that the value of '((int *)arg1)[1]' is used for many
859 * an integer parameter to the subcommand for many of these sysctls; we'd
860 * rather have used '((int *)arg1)[0]' for that, or even better, an element
861 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
862 * and then use leaf-node permissions enforcement, but that would have
863 * necessitated modifying user space code to correspond to the interface
864 * change, and we are striving for binary backward compatibility here; even
865 * though these are SPI, and not intended for use by user space applications
866 * which are not themselves system tools or libraries, some applications
867 * have erroneously used them.
868 */
869 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_ALL, /* Integer argument (arg2) */
872 sysctl_prochandle, /* Handler function */
873 NULL, /* Data is size variant on ILP32/LP64 */
874 "");
875 SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_PID, /* Integer argument (arg2) */
878 sysctl_prochandle, /* Handler function */
879 NULL, /* Data is size variant on ILP32/LP64 */
880 "");
881 SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_TTY, /* Integer argument (arg2) */
884 sysctl_prochandle, /* Handler function */
885 NULL, /* Data is size variant on ILP32/LP64 */
886 "");
887 SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_PGRP, /* Integer argument (arg2) */
890 sysctl_prochandle, /* Handler function */
891 NULL, /* Data is size variant on ILP32/LP64 */
892 "");
893 SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
894 0, /* Pointer argument (arg1) */
895 KERN_PROC_UID, /* Integer argument (arg2) */
896 sysctl_prochandle, /* Handler function */
897 NULL, /* Data is size variant on ILP32/LP64 */
898 "");
899 SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
900 0, /* Pointer argument (arg1) */
901 KERN_PROC_RUID, /* Integer argument (arg2) */
902 sysctl_prochandle, /* Handler function */
903 NULL, /* Data is size variant on ILP32/LP64 */
904 "");
905 SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_LCID, /* Integer argument (arg2) */
908 sysctl_prochandle, /* Handler function */
909 NULL, /* Data is size variant on ILP32/LP64 */
910 "");
911
912
913 /*
914 * Fill in non-zero fields of an eproc structure for the specified process.
915 */
916 STATIC void
917 fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
918 {
919 struct tty *tp;
920 struct pgrp *pg;
921 struct session *sessp;
922 kauth_cred_t my_cred;
923
924 pg = proc_pgrp(p);
925 sessp = proc_session(p);
926
927 if (pg != PGRP_NULL) {
928 ep->e_pgid = p->p_pgrpid;
929 ep->e_jobc = pg->pg_jobc;
930 if (sessp != SESSION_NULL && sessp->s_ttyvp)
931 ep->e_flag = EPROC_CTTY;
932 }
933 ep->e_ppid = p->p_ppid;
934 if (p->p_ucred) {
935 my_cred = kauth_cred_proc_ref(p);
936
937 /* A fake historical pcred */
938 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
939 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
940 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
941 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
942
943 /* A fake historical *kauth_cred_t */
944 ep->e_ucred.cr_ref = my_cred->cr_ref;
945 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
946 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
947 bcopy(posix_cred_get(my_cred)->cr_groups,
948 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
949
950 kauth_cred_unref(&my_cred);
951 }
952
953 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
954 (tp = SESSION_TP(sessp))) {
955 ep->e_tdev = tp->t_dev;
956 ep->e_tpgid = sessp->s_ttypgrpid;
957 } else
958 ep->e_tdev = NODEV;
959
960 if (sessp != SESSION_NULL) {
961 if (SESS_LEADER(p, sessp))
962 ep->e_flag |= EPROC_SLEADER;
963 session_rele(sessp);
964 }
965 if (pg != PGRP_NULL)
966 pg_rele(pg);
967 }
968
969 /*
970 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
971 */
972 STATIC void
973 fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
974 {
975 struct tty *tp;
976 struct pgrp *pg;
977 struct session *sessp;
978 kauth_cred_t my_cred;
979
980 pg = proc_pgrp(p);
981 sessp = proc_session(p);
982
983 if (pg != PGRP_NULL) {
984 ep->e_pgid = p->p_pgrpid;
985 ep->e_jobc = pg->pg_jobc;
986 if (sessp != SESSION_NULL && sessp->s_ttyvp)
987 ep->e_flag = EPROC_CTTY;
988 }
989 ep->e_ppid = p->p_ppid;
990 if (p->p_ucred) {
991 my_cred = kauth_cred_proc_ref(p);
992
993 /* A fake historical pcred */
994 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
995 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
996 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
997 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
998
999 /* A fake historical *kauth_cred_t */
1000 ep->e_ucred.cr_ref = my_cred->cr_ref;
1001 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1002 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
1003 bcopy(posix_cred_get(my_cred)->cr_groups,
1004 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
1005
1006 kauth_cred_unref(&my_cred);
1007 }
1008
1009 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1010 (tp = SESSION_TP(sessp))) {
1011 ep->e_tdev = tp->t_dev;
1012 ep->e_tpgid = sessp->s_ttypgrpid;
1013 } else
1014 ep->e_tdev = NODEV;
1015
1016 if (sessp != SESSION_NULL) {
1017 if (SESS_LEADER(p, sessp))
1018 ep->e_flag |= EPROC_SLEADER;
1019 session_rele(sessp);
1020 }
1021 if (pg != PGRP_NULL)
1022 pg_rele(pg);
1023 }
1024
1025 /*
1026 * Fill in an eproc structure for the specified process.
1027 * bzeroed by our caller, so only set non-zero fields.
1028 */
1029 STATIC void
1030 fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1031 {
1032 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1033 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1034 exp->p_flag = p->p_flag;
1035 if (p->p_lflag & P_LTRACED)
1036 exp->p_flag |= P_TRACED;
1037 if (p->p_lflag & P_LPPWAIT)
1038 exp->p_flag |= P_PPWAIT;
1039 if (p->p_lflag & P_LEXIT)
1040 exp->p_flag |= P_WEXIT;
1041 exp->p_stat = p->p_stat;
1042 exp->p_pid = p->p_pid;
1043 exp->p_oppid = p->p_oppid;
1044 /* Mach related */
1045 exp->user_stack = p->user_stack;
1046 exp->p_debugger = p->p_debugger;
1047 exp->sigwait = p->sigwait;
1048 /* scheduling */
1049 #ifdef _PROC_HAS_SCHEDINFO_
1050 exp->p_estcpu = p->p_estcpu;
1051 exp->p_pctcpu = p->p_pctcpu;
1052 exp->p_slptime = p->p_slptime;
1053 #endif
1054 exp->p_realtimer.it_interval.tv_sec =
1055 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1056 exp->p_realtimer.it_interval.tv_usec =
1057 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1058
1059 exp->p_realtimer.it_value.tv_sec =
1060 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1061 exp->p_realtimer.it_value.tv_usec =
1062 (__int32_t)p->p_realtimer.it_value.tv_usec;
1063
1064 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1065 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1066
1067 exp->p_sigignore = p->p_sigignore;
1068 exp->p_sigcatch = p->p_sigcatch;
1069 exp->p_priority = p->p_priority;
1070 exp->p_nice = p->p_nice;
1071 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1072 exp->p_xstat = p->p_xstat;
1073 exp->p_acflag = p->p_acflag;
1074 }
1075
1076 /*
1077 * Fill in an LP64 version of extern_proc structure for the specified process.
1078 */
1079 STATIC void
1080 fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1081 {
1082 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1083 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1084 exp->p_flag = p->p_flag;
1085 if (p->p_lflag & P_LTRACED)
1086 exp->p_flag |= P_TRACED;
1087 if (p->p_lflag & P_LPPWAIT)
1088 exp->p_flag |= P_PPWAIT;
1089 if (p->p_lflag & P_LEXIT)
1090 exp->p_flag |= P_WEXIT;
1091 exp->p_stat = p->p_stat;
1092 exp->p_pid = p->p_pid;
1093 exp->p_oppid = p->p_oppid;
1094 /* Mach related */
1095 exp->user_stack = p->user_stack;
1096 exp->p_debugger = p->p_debugger;
1097 exp->sigwait = p->sigwait;
1098 /* scheduling */
1099 #ifdef _PROC_HAS_SCHEDINFO_
1100 exp->p_estcpu = p->p_estcpu;
1101 exp->p_pctcpu = p->p_pctcpu;
1102 exp->p_slptime = p->p_slptime;
1103 #endif
1104 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1105 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1106
1107 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1108 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1109
1110 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1111 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1112
1113 exp->p_sigignore = p->p_sigignore;
1114 exp->p_sigcatch = p->p_sigcatch;
1115 exp->p_priority = p->p_priority;
1116 exp->p_nice = p->p_nice;
1117 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1118 exp->p_xstat = p->p_xstat;
1119 exp->p_acflag = p->p_acflag;
1120 }
1121
1122 STATIC void
1123 fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1124 {
1125 /* on a 64 bit kernel, 32 bit users get some truncated information */
1126 fill_user32_externproc(p, &kp->kp_proc);
1127 fill_user32_eproc(p, &kp->kp_eproc);
1128 }
1129
1130 STATIC void
1131 fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1132 {
1133 fill_user64_externproc(p, &kp->kp_proc);
1134 fill_user64_eproc(p, &kp->kp_eproc);
1135 }
1136
1137 STATIC int
1138 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1139 {
1140 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1141 int *name = arg1; /* oid element argument vector */
1142 int namelen = arg2; /* number of oid element arguments */
1143 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1144 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1145 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1146 // size_t newlen = req->newlen; /* user buffer copy in size */
1147
1148 int ret=0;
1149
1150 if (namelen == 0)
1151 return(ENOTSUP);
1152
1153 switch(name[0]) {
1154 case KERN_KDEFLAGS:
1155 case KERN_KDDFLAGS:
1156 case KERN_KDENABLE:
1157 case KERN_KDGETBUF:
1158 case KERN_KDSETUP:
1159 case KERN_KDREMOVE:
1160 case KERN_KDSETREG:
1161 case KERN_KDGETREG:
1162 case KERN_KDREADTR:
1163 case KERN_KDWRITETR:
1164 case KERN_KDWRITEMAP:
1165 case KERN_KDTEST:
1166 case KERN_KDPIDTR:
1167 case KERN_KDTHRMAP:
1168 case KERN_KDPIDEX:
1169 case KERN_KDSETBUF:
1170 case KERN_KDREADCURTHRMAP:
1171 case KERN_KDSET_TYPEFILTER:
1172 case KERN_KDBUFWAIT:
1173 case KERN_KDCPUMAP:
1174 case KERN_KDWRITEMAP_V3:
1175 case KERN_KDWRITETR_V3:
1176 ret = kdbg_control(name, namelen, oldp, oldlenp);
1177 break;
1178 default:
1179 ret= ENOTSUP;
1180 break;
1181 }
1182
1183 /* adjust index so we return the right required/consumed amount */
1184 if (!ret)
1185 req->oldidx += req->oldlen;
1186
1187 return (ret);
1188 }
1189 SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1190 0, /* Pointer argument (arg1) */
1191 0, /* Integer argument (arg2) */
1192 sysctl_kdebug_ops, /* Handler function */
1193 NULL, /* Data pointer */
1194 "");
1195
1196
1197 #if !CONFIG_EMBEDDED
1198 /*
1199 * Return the top *sizep bytes of the user stack, or the entire area of the
1200 * user stack down through the saved exec_path, whichever is smaller.
1201 */
1202 STATIC int
1203 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1204 {
1205 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1206 int *name = arg1; /* oid element argument vector */
1207 int namelen = arg2; /* number of oid element arguments */
1208 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1209 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1210 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1211 // size_t newlen = req->newlen; /* user buffer copy in size */
1212 int error;
1213
1214 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1215
1216 /* adjust index so we return the right required/consumed amount */
1217 if (!error)
1218 req->oldidx += req->oldlen;
1219
1220 return (error);
1221 }
1222 SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1223 0, /* Pointer argument (arg1) */
1224 0, /* Integer argument (arg2) */
1225 sysctl_doprocargs, /* Handler function */
1226 NULL, /* Data pointer */
1227 "");
1228 #endif /* !CONFIG_EMBEDDED */
1229
1230 STATIC int
1231 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1232 {
1233 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1234 int *name = arg1; /* oid element argument vector */
1235 int namelen = arg2; /* number of oid element arguments */
1236 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1237 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1238 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1239 // size_t newlen = req->newlen; /* user buffer copy in size */
1240 int error;
1241
1242 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1243
1244 /* adjust index so we return the right required/consumed amount */
1245 if (!error)
1246 req->oldidx += req->oldlen;
1247
1248 return (error);
1249 }
1250 SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1251 0, /* Pointer argument (arg1) */
1252 0, /* Integer argument (arg2) */
1253 sysctl_doprocargs2, /* Handler function */
1254 NULL, /* Data pointer */
1255 "");
1256
1257 STATIC int
1258 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1259 size_t *sizep, proc_t cur_proc, int argc_yes)
1260 {
1261 proc_t p;
1262 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1263 int error = 0;
1264 struct _vm_map *proc_map;
1265 struct task * task;
1266 vm_map_copy_t tmp;
1267 user_addr_t arg_addr;
1268 size_t arg_size;
1269 caddr_t data;
1270 size_t argslen=0;
1271 int size;
1272 vm_offset_t copy_start, copy_end;
1273 kern_return_t ret;
1274 int pid;
1275 kauth_cred_t my_cred;
1276 uid_t uid;
1277 int argc = -1;
1278
1279 if ( namelen < 1 )
1280 return(EINVAL);
1281
1282 if (argc_yes)
1283 buflen -= sizeof(int); /* reserve first word to return argc */
1284
1285 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1286 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1287 /* is not NULL then the caller wants us to return the length needed to */
1288 /* hold the data we would return */
1289 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1290 return(EINVAL);
1291 }
1292 arg_size = buflen;
1293
1294 /*
1295 * Lookup process by pid
1296 */
1297 pid = name[0];
1298 p = proc_find(pid);
1299 if (p == NULL) {
1300 return(EINVAL);
1301 }
1302
1303 /*
1304 * Copy the top N bytes of the stack.
1305 * On all machines we have so far, the stack grows
1306 * downwards.
1307 *
1308 * If the user expects no more than N bytes of
1309 * argument list, use that as a guess for the
1310 * size.
1311 */
1312
1313 if (!p->user_stack) {
1314 proc_rele(p);
1315 return(EINVAL);
1316 }
1317
1318 if (where == USER_ADDR_NULL) {
1319 /* caller only wants to know length of proc args data */
1320 if (sizep == NULL) {
1321 proc_rele(p);
1322 return(EFAULT);
1323 }
1324
1325 size = p->p_argslen;
1326 proc_rele(p);
1327 if (argc_yes) {
1328 size += sizeof(int);
1329 } else {
1330 /*
1331 * old PROCARGS will return the executable's path and plus some
1332 * extra space for work alignment and data tags
1333 */
1334 size += PATH_MAX + (6 * sizeof(int));
1335 }
1336 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1337 *sizep = size;
1338 return (0);
1339 }
1340
1341 my_cred = kauth_cred_proc_ref(p);
1342 uid = kauth_cred_getuid(my_cred);
1343 kauth_cred_unref(&my_cred);
1344
1345 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1346 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1347 proc_rele(p);
1348 return (EINVAL);
1349 }
1350
1351 if ((u_int)arg_size > p->p_argslen)
1352 arg_size = round_page(p->p_argslen);
1353
1354 arg_addr = p->user_stack - arg_size;
1355
1356 /*
1357 * Before we can block (any VM code), make another
1358 * reference to the map to keep it alive. We do
1359 * that by getting a reference on the task itself.
1360 */
1361 task = p->task;
1362 if (task == NULL) {
1363 proc_rele(p);
1364 return(EINVAL);
1365 }
1366
1367 /* save off argc before releasing the proc */
1368 argc = p->p_argc;
1369
1370 argslen = p->p_argslen;
1371 /*
1372 * Once we have a task reference we can convert that into a
1373 * map reference, which we will use in the calls below. The
1374 * task/process may change its map after we take this reference
1375 * (see execve), but the worst that will happen then is a return
1376 * of stale info (which is always a possibility).
1377 */
1378 task_reference(task);
1379 proc_rele(p);
1380 proc_map = get_task_map_reference(task);
1381 task_deallocate(task);
1382
1383 if (proc_map == NULL)
1384 return(EINVAL);
1385
1386
1387 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size), VM_KERN_MEMORY_BSD);
1388 if (ret != KERN_SUCCESS) {
1389 vm_map_deallocate(proc_map);
1390 return(ENOMEM);
1391 }
1392
1393 copy_end = round_page(copy_start + arg_size);
1394
1395 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1396 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1397 vm_map_deallocate(proc_map);
1398 kmem_free(kernel_map, copy_start,
1399 round_page(arg_size));
1400 return (EIO);
1401 }
1402
1403 /*
1404 * Now that we've done the copyin from the process'
1405 * map, we can release the reference to it.
1406 */
1407 vm_map_deallocate(proc_map);
1408
1409 if( vm_map_copy_overwrite(kernel_map,
1410 (vm_map_address_t)copy_start,
1411 tmp, FALSE) != KERN_SUCCESS) {
1412 kmem_free(kernel_map, copy_start,
1413 round_page(arg_size));
1414 vm_map_copy_discard(tmp);
1415 return (EIO);
1416 }
1417
1418 if (arg_size > argslen) {
1419 data = (caddr_t) (copy_end - argslen);
1420 size = argslen;
1421 } else {
1422 data = (caddr_t) (copy_end - arg_size);
1423 size = arg_size;
1424 }
1425
1426 /*
1427 * When these sysctls were introduced, the first string in the strings
1428 * section was just the bare path of the executable. However, for security
1429 * reasons we now prefix this string with executable_path= so it can be
1430 * parsed getenv style. To avoid binary compatability issues with exising
1431 * callers of this sysctl, we strip it off here if present.
1432 * (rdar://problem/13746466)
1433 */
1434 #define EXECUTABLE_KEY "executable_path="
1435 if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
1436 data += strlen(EXECUTABLE_KEY);
1437 size -= strlen(EXECUTABLE_KEY);
1438 }
1439
1440 if (argc_yes) {
1441 /* Put processes argc as the first word in the copyout buffer */
1442 suword(where, argc);
1443 error = copyout(data, (where + sizeof(int)), size);
1444 size += sizeof(int);
1445 } else {
1446 error = copyout(data, where, size);
1447
1448 /*
1449 * Make the old PROCARGS work to return the executable's path
1450 * But, only if there is enough space in the provided buffer
1451 *
1452 * on entry: data [possibily] points to the beginning of the path
1453 *
1454 * Note: we keep all pointers&sizes aligned to word boundries
1455 */
1456 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1457 {
1458 int binPath_sz, alignedBinPath_sz = 0;
1459 int extraSpaceNeeded, addThis;
1460 user_addr_t placeHere;
1461 char * str = (char *) data;
1462 int max_len = size;
1463
1464 /* Some apps are really bad about messing up their stacks
1465 So, we have to be extra careful about getting the length
1466 of the executing binary. If we encounter an error, we bail.
1467 */
1468
1469 /* Limit ourselves to PATH_MAX paths */
1470 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1471
1472 binPath_sz = 0;
1473
1474 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1475 binPath_sz++;
1476
1477 /* If we have a NUL terminator, copy it, too */
1478 if (binPath_sz < max_len-1) binPath_sz += 1;
1479
1480 /* Pre-Flight the space requiremnts */
1481
1482 /* Account for the padding that fills out binPath to the next word */
1483 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1484
1485 placeHere = where + size;
1486
1487 /* Account for the bytes needed to keep placeHere word aligned */
1488 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1489
1490 /* Add up all the space that is needed */
1491 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1492
1493 /* is there is room to tack on argv[0]? */
1494 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1495 {
1496 placeHere += addThis;
1497 suword(placeHere, 0);
1498 placeHere += sizeof(int);
1499 suword(placeHere, 0xBFFF0000);
1500 placeHere += sizeof(int);
1501 suword(placeHere, 0);
1502 placeHere += sizeof(int);
1503 error = copyout(data, placeHere, binPath_sz);
1504 if ( ! error )
1505 {
1506 placeHere += binPath_sz;
1507 suword(placeHere, 0);
1508 size += extraSpaceNeeded;
1509 }
1510 }
1511 }
1512 }
1513
1514 if (copy_start != (vm_offset_t) 0) {
1515 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1516 }
1517 if (error) {
1518 return(error);
1519 }
1520
1521 if (where != USER_ADDR_NULL)
1522 *sizep = size;
1523 return (0);
1524 }
1525
1526
1527 /*
1528 * Max number of concurrent aio requests
1529 */
1530 STATIC int
1531 sysctl_aiomax
1532 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1533 {
1534 int new_value, changed;
1535 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1536 if (changed) {
1537 /* make sure the system-wide limit is greater than the per process limit */
1538 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1539 aio_max_requests = new_value;
1540 else
1541 error = EINVAL;
1542 }
1543 return(error);
1544 }
1545
1546
1547 /*
1548 * Max number of concurrent aio requests per process
1549 */
1550 STATIC int
1551 sysctl_aioprocmax
1552 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1553 {
1554 int new_value, changed;
1555 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1556 if (changed) {
1557 /* make sure per process limit is less than the system-wide limit */
1558 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1559 aio_max_requests_per_process = new_value;
1560 else
1561 error = EINVAL;
1562 }
1563 return(error);
1564 }
1565
1566
1567 /*
1568 * Max number of async IO worker threads
1569 */
1570 STATIC int
1571 sysctl_aiothreads
1572 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1573 {
1574 int new_value, changed;
1575 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1576 if (changed) {
1577 /* we only allow an increase in the number of worker threads */
1578 if (new_value > aio_worker_threads ) {
1579 _aio_create_worker_threads((new_value - aio_worker_threads));
1580 aio_worker_threads = new_value;
1581 }
1582 else
1583 error = EINVAL;
1584 }
1585 return(error);
1586 }
1587
1588
1589 /*
1590 * System-wide limit on the max number of processes
1591 */
1592 STATIC int
1593 sysctl_maxproc
1594 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1595 {
1596 int new_value, changed;
1597 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1598 if (changed) {
1599 AUDIT_ARG(value32, new_value);
1600 /* make sure the system-wide limit is less than the configured hard
1601 limit set at kernel compilation */
1602 if (new_value <= hard_maxproc && new_value > 0)
1603 maxproc = new_value;
1604 else
1605 error = EINVAL;
1606 }
1607 return(error);
1608 }
1609
1610 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1611 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1612 ostype, 0, "");
1613 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1614 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1615 osrelease, 0, "");
1616 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1617 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1618 (int *)NULL, BSD, "");
1619 SYSCTL_STRING(_kern, KERN_VERSION, version,
1620 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1621 version, 0, "");
1622 SYSCTL_STRING(_kern, OID_AUTO, uuid,
1623 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1624 &kernel_uuid_string[0], 0, "");
1625 #if DEBUG
1626 #ifndef DKPR
1627 #define DKPR 1
1628 #endif
1629 #endif
1630
1631 #if DKPR
1632 int debug_kprint_syscall = 0;
1633 char debug_kprint_syscall_process[MAXCOMLEN+1];
1634
1635 /* Thread safe: bits and string value are not used to reclaim state */
1636 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1637 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1638 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1639 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1640 "name of process for kprintf syscall tracing");
1641
1642 int debug_kprint_current_process(const char **namep)
1643 {
1644 struct proc *p = current_proc();
1645
1646 if (p == NULL) {
1647 return 0;
1648 }
1649
1650 if (debug_kprint_syscall_process[0]) {
1651 /* user asked to scope tracing to a particular process name */
1652 if(0 == strncmp(debug_kprint_syscall_process,
1653 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1654 /* no value in telling the user that we traced what they asked */
1655 if(namep) *namep = NULL;
1656
1657 return 1;
1658 } else {
1659 return 0;
1660 }
1661 }
1662
1663 /* trace all processes. Tell user what we traced */
1664 if (namep) {
1665 *namep = p->p_comm;
1666 }
1667
1668 return 1;
1669 }
1670 #endif
1671
1672 /* PR-5293665: need to use a callback function for kern.osversion to set
1673 * osversion in IORegistry */
1674
1675 STATIC int
1676 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1677 {
1678 int rval = 0;
1679
1680 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1681
1682 if (req->newptr) {
1683 IORegistrySetOSBuildVersion((char *)arg1);
1684 }
1685
1686 return rval;
1687 }
1688
1689 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1690 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1691 osversion, 256 /* OSVERSIZE*/,
1692 sysctl_osversion, "A", "");
1693
1694 static uint64_t osvariant_status = 0;
1695
1696 STATIC int
1697 sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1698 {
1699 if (req->newptr != 0) {
1700 /*
1701 * Can only ever be set by launchd, and only once at boot.
1702 */
1703 if (req->p->p_pid != 1 || osvariant_status != 0) {
1704 return EPERM;
1705 }
1706 }
1707
1708 return sysctl_handle_quad(oidp, arg1, arg2, req);
1709 }
1710
1711 SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
1712 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1713 &osvariant_status, sizeof(osvariant_status),
1714 sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
1715
1716 STATIC int
1717 sysctl_sysctl_bootargs
1718 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1719 {
1720 int error;
1721 /* BOOT_LINE_LENGTH */
1722 #if CONFIG_EMBEDDED
1723 size_t boot_args_len = 256;
1724 #else
1725 size_t boot_args_len = 1024;
1726 #endif
1727 char buf[boot_args_len];
1728
1729 strlcpy(buf, PE_boot_args(), boot_args_len);
1730 error = sysctl_io_string(req, buf, boot_args_len, 0, NULL);
1731 return(error);
1732 }
1733
1734 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1735 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1736 NULL, 0,
1737 sysctl_sysctl_bootargs, "A", "bootargs");
1738
1739 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1740 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1741 &maxfiles, 0, "");
1742 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1743 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1744 (int *)NULL, ARG_MAX, "");
1745 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1746 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1747 (int *)NULL, _POSIX_VERSION, "");
1748 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1749 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1750 (int *)NULL, NGROUPS_MAX, "");
1751 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1752 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1753 (int *)NULL, 1, "");
1754 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1755 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1756 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1757 (int *)NULL, 1, "");
1758 #else
1759 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1760 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1761 NULL, 0, "");
1762 #endif
1763 SYSCTL_INT(_kern, OID_AUTO, num_files,
1764 CTLFLAG_RD | CTLFLAG_LOCKED,
1765 &nfiles, 0, "");
1766 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1767 CTLFLAG_RD | CTLFLAG_LOCKED,
1768 &numvnodes, 0, "");
1769 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1770 CTLFLAG_RD | CTLFLAG_LOCKED,
1771 &task_max, 0, "");
1772 SYSCTL_INT(_kern, OID_AUTO, num_threads,
1773 CTLFLAG_RD | CTLFLAG_LOCKED,
1774 &thread_max, 0, "");
1775 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1776 CTLFLAG_RD | CTLFLAG_LOCKED,
1777 &task_threadmax, 0, "");
1778
1779 STATIC int
1780 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1781 {
1782 int oldval = desiredvnodes;
1783 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1784
1785 if (oldval != desiredvnodes) {
1786 resize_namecache(desiredvnodes);
1787 }
1788
1789 return(error);
1790 }
1791
1792 SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1793 CTLFLAG_RW | CTLFLAG_LOCKED,
1794 &nc_disabled, 0, "");
1795
1796 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1797 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1798 0, 0, sysctl_maxvnodes, "I", "");
1799
1800 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1801 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1802 0, 0, sysctl_maxproc, "I", "");
1803
1804 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1805 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1806 0, 0, sysctl_aiomax, "I", "");
1807
1808 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1809 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1810 0, 0, sysctl_aioprocmax, "I", "");
1811
1812 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1813 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1814 0, 0, sysctl_aiothreads, "I", "");
1815
1816 #if (DEVELOPMENT || DEBUG)
1817 extern int sched_smt_balance;
1818 SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1819 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1820 &sched_smt_balance, 0, "");
1821 #if __arm__ || __arm64__
1822 extern uint32_t perfcontrol_requested_recommended_cores;
1823 SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores,
1824 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1825 &perfcontrol_requested_recommended_cores, 0, "");
1826
1827 /* Scheduler perfcontrol callouts sysctls */
1828 SYSCTL_DECL(_kern_perfcontrol_callout);
1829 SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
1830 "scheduler perfcontrol callouts");
1831
1832 extern int perfcontrol_callout_stats_enabled;
1833 SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled,
1834 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1835 &perfcontrol_callout_stats_enabled, 0, "");
1836
1837 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
1838 perfcontrol_callout_stat_t stat);
1839
1840 /* On-Core Callout */
1841 STATIC int
1842 sysctl_perfcontrol_callout_stat
1843 (__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1844 {
1845 perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
1846 perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
1847 return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
1848 sizeof(int), NULL, NULL);
1849 }
1850
1851 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
1852 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1853 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
1854 sysctl_perfcontrol_callout_stat, "I", "");
1855 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
1856 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1857 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
1858 sysctl_perfcontrol_callout_stat, "I", "");
1859 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
1860 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1861 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
1862 sysctl_perfcontrol_callout_stat, "I", "");
1863 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
1864 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1865 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
1866 sysctl_perfcontrol_callout_stat, "I", "");
1867 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
1868 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1869 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
1870 sysctl_perfcontrol_callout_stat, "I", "");
1871 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
1872 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1873 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
1874 sysctl_perfcontrol_callout_stat, "I", "");
1875 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
1876 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1877 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
1878 sysctl_perfcontrol_callout_stat, "I", "");
1879 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
1880 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1881 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
1882 sysctl_perfcontrol_callout_stat, "I", "");
1883
1884 #endif /* __arm__ || __arm64__ */
1885 #endif /* (DEVELOPMENT || DEBUG) */
1886
1887 STATIC int
1888 sysctl_securelvl
1889 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1890 {
1891 int new_value, changed;
1892 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1893 if (changed) {
1894 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1895 proc_list_lock();
1896 securelevel = new_value;
1897 proc_list_unlock();
1898 } else {
1899 error = EPERM;
1900 }
1901 }
1902 return(error);
1903 }
1904
1905 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1906 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1907 0, 0, sysctl_securelvl, "I", "");
1908
1909
1910 STATIC int
1911 sysctl_domainname
1912 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1913 {
1914 int error, changed;
1915 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1916 if (changed) {
1917 domainnamelen = strlen(domainname);
1918 }
1919 return(error);
1920 }
1921
1922 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1923 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1924 0, 0, sysctl_domainname, "A", "");
1925
1926 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1927 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1928 &hostid, 0, "");
1929
1930 STATIC int
1931 sysctl_hostname
1932 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1933 {
1934 int error, changed;
1935 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1936 if (changed) {
1937 hostnamelen = req->newlen;
1938 }
1939 return(error);
1940 }
1941
1942
1943 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1944 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1945 0, 0, sysctl_hostname, "A", "");
1946
1947 STATIC int
1948 sysctl_procname
1949 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1950 {
1951 /* Original code allowed writing, I'm copying this, although this all makes
1952 no sense to me. Besides, this sysctl is never used. */
1953 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1954 }
1955
1956 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
1957 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1958 0, 0, sysctl_procname, "A", "");
1959
1960 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
1961 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1962 &speculative_reads_disabled, 0, "");
1963
1964 SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
1965 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1966 &preheat_max_bytes, 0, "");
1967
1968 SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
1969 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1970 &preheat_min_bytes, 0, "");
1971
1972 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1973 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1974 &speculative_prefetch_max, 0, "");
1975
1976 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
1977 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1978 &speculative_prefetch_max_iosize, 0, "");
1979
1980 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
1981 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1982 &vm_page_free_target, 0, "");
1983
1984 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
1985 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1986 &vm_page_free_min, 0, "");
1987
1988 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
1989 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1990 &vm_page_free_reserved, 0, "");
1991
1992 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
1993 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1994 &vm_page_speculative_percentage, 0, "");
1995
1996 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
1997 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1998 &vm_page_speculative_q_age_ms, 0, "");
1999
2000 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
2001 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2002 &vm_max_delayed_work_limit, 0, "");
2003
2004 SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
2005 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2006 &vm_max_batch, 0, "");
2007
2008 SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
2009 CTLFLAG_RD | CTLFLAG_LOCKED,
2010 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
2011
2012 STATIC int
2013 sysctl_boottime
2014 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2015 {
2016 struct timeval tv;
2017 boottime_timeval(&tv);
2018 struct proc *p = req->p;
2019
2020 if (proc_is64bit(p)) {
2021 struct user64_timeval t = {};
2022 t.tv_sec = tv.tv_sec;
2023 t.tv_usec = tv.tv_usec;
2024 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2025 } else {
2026 struct user32_timeval t = {};
2027 t.tv_sec = tv.tv_sec;
2028 t.tv_usec = tv.tv_usec;
2029 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2030 }
2031 }
2032
2033 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2034 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2035 0, 0, sysctl_boottime, "S,timeval", "");
2036
2037 STATIC int
2038 sysctl_symfile
2039 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2040 {
2041 char *str;
2042 int error = get_kernel_symfile(req->p, &str);
2043 if (error)
2044 return (error);
2045 return sysctl_io_string(req, str, 0, 0, NULL);
2046 }
2047
2048
2049 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2050 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
2051 0, 0, sysctl_symfile, "A", "");
2052
2053 #if NFSCLIENT
2054 STATIC int
2055 sysctl_netboot
2056 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2057 {
2058 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2059 }
2060
2061 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2062 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2063 0, 0, sysctl_netboot, "I", "");
2064 #endif
2065
2066 #ifdef CONFIG_IMGSRC_ACCESS
2067 /*
2068 * Legacy--act as if only one layer of nesting is possible.
2069 */
2070 STATIC int
2071 sysctl_imgsrcdev
2072 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2073 {
2074 vfs_context_t ctx = vfs_context_current();
2075 vnode_t devvp;
2076 int result;
2077
2078 if (!vfs_context_issuser(ctx)) {
2079 return EPERM;
2080 }
2081
2082 if (imgsrc_rootvnodes[0] == NULL) {
2083 return ENOENT;
2084 }
2085
2086 result = vnode_getwithref(imgsrc_rootvnodes[0]);
2087 if (result != 0) {
2088 return result;
2089 }
2090
2091 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
2092 result = vnode_getwithref(devvp);
2093 if (result != 0) {
2094 goto out;
2095 }
2096
2097 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
2098
2099 vnode_put(devvp);
2100 out:
2101 vnode_put(imgsrc_rootvnodes[0]);
2102 return result;
2103 }
2104
2105 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
2106 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2107 0, 0, sysctl_imgsrcdev, "I", "");
2108
2109 STATIC int
2110 sysctl_imgsrcinfo
2111 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2112 {
2113 int error;
2114 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */
2115 uint32_t i;
2116 vnode_t rvp, devvp;
2117
2118 if (imgsrc_rootvnodes[0] == NULLVP) {
2119 return ENXIO;
2120 }
2121
2122 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2123 /*
2124 * Go get the root vnode.
2125 */
2126 rvp = imgsrc_rootvnodes[i];
2127 if (rvp == NULLVP) {
2128 break;
2129 }
2130
2131 error = vnode_get(rvp);
2132 if (error != 0) {
2133 return error;
2134 }
2135
2136 /*
2137 * For now, no getting at a non-local volume.
2138 */
2139 devvp = vnode_mount(rvp)->mnt_devvp;
2140 if (devvp == NULL) {
2141 vnode_put(rvp);
2142 return EINVAL;
2143 }
2144
2145 error = vnode_getwithref(devvp);
2146 if (error != 0) {
2147 vnode_put(rvp);
2148 return error;
2149 }
2150
2151 /*
2152 * Fill in info.
2153 */
2154 info[i].ii_dev = vnode_specrdev(devvp);
2155 info[i].ii_flags = 0;
2156 info[i].ii_height = i;
2157 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2158
2159 vnode_put(devvp);
2160 vnode_put(rvp);
2161 }
2162
2163 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2164 }
2165
2166 SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2167 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2168 0, 0, sysctl_imgsrcinfo, "I", "");
2169
2170 #endif /* CONFIG_IMGSRC_ACCESS */
2171
2172
2173 SYSCTL_DECL(_kern_timer);
2174 SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2175
2176
2177 SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2178 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2179 &mach_timer_coalescing_enabled, 0, "");
2180
2181 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2182 CTLFLAG_RW | CTLFLAG_LOCKED,
2183 &timer_deadline_tracking_bin_1, "");
2184 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2185 CTLFLAG_RW | CTLFLAG_LOCKED,
2186 &timer_deadline_tracking_bin_2, "");
2187
2188 SYSCTL_DECL(_kern_timer_longterm);
2189 SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2190
2191
2192 /* Must match definition in osfmk/kern/timer_call.c */
2193 enum {
2194 THRESHOLD, QCOUNT,
2195 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2196 LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, PAUSES
2197 };
2198 extern uint64_t timer_sysctl_get(int);
2199 extern int timer_sysctl_set(int, uint64_t);
2200
2201 STATIC int
2202 sysctl_timer
2203 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2204 {
2205 int oid = (int)arg1;
2206 uint64_t value = timer_sysctl_get(oid);
2207 uint64_t new_value;
2208 int error;
2209 int changed;
2210
2211 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2212 if (changed)
2213 error = timer_sysctl_set(oid, new_value);
2214
2215 return error;
2216 }
2217
2218 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2219 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2220 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2221 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
2222 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2223 (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
2224 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2225 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2226 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
2227 #if DEBUG
2228 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2229 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2230 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2231 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2232 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2233 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2234 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2235 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2236 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2237 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2238 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2239 (void *) SCANS, 0, sysctl_timer, "Q", "");
2240 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2241 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2242 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2243 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2244 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2245 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2246 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2247 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2248 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2249 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2250 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2251 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2252 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
2253 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2254 (void *) PAUSES, 0, sysctl_timer, "Q", "");
2255 #endif /* DEBUG */
2256
2257 STATIC int
2258 sysctl_usrstack
2259 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2260 {
2261 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2262 }
2263
2264 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2265 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2266 0, 0, sysctl_usrstack, "I", "");
2267
2268 STATIC int
2269 sysctl_usrstack64
2270 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2271 {
2272 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2273 }
2274
2275 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2276 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2277 0, 0, sysctl_usrstack64, "Q", "");
2278
2279 #if CONFIG_COREDUMP
2280
2281 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2282 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2283 corefilename, sizeof(corefilename), "");
2284
2285 STATIC int
2286 sysctl_coredump
2287 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2288 {
2289 #ifdef SECURE_KERNEL
2290 (void)req;
2291 return (ENOTSUP);
2292 #else
2293 int new_value, changed;
2294 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2295 if (changed) {
2296 if ((new_value == 0) || (new_value == 1))
2297 do_coredump = new_value;
2298 else
2299 error = EINVAL;
2300 }
2301 return(error);
2302 #endif
2303 }
2304
2305 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2306 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2307 0, 0, sysctl_coredump, "I", "");
2308
2309 STATIC int
2310 sysctl_suid_coredump
2311 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2312 {
2313 #ifdef SECURE_KERNEL
2314 (void)req;
2315 return (ENOTSUP);
2316 #else
2317 int new_value, changed;
2318 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2319 if (changed) {
2320 if ((new_value == 0) || (new_value == 1))
2321 sugid_coredump = new_value;
2322 else
2323 error = EINVAL;
2324 }
2325 return(error);
2326 #endif
2327 }
2328
2329 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2330 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2331 0, 0, sysctl_suid_coredump, "I", "");
2332
2333 #endif /* CONFIG_COREDUMP */
2334
2335 STATIC int
2336 sysctl_delayterm
2337 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2338 {
2339 struct proc *p = req->p;
2340 int new_value, changed;
2341 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2342 if (changed) {
2343 proc_lock(p);
2344 if (new_value)
2345 req->p->p_lflag |= P_LDELAYTERM;
2346 else
2347 req->p->p_lflag &= ~P_LDELAYTERM;
2348 proc_unlock(p);
2349 }
2350 return(error);
2351 }
2352
2353 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2354 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2355 0, 0, sysctl_delayterm, "I", "");
2356
2357
2358 STATIC int
2359 sysctl_rage_vnode
2360 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2361 {
2362 struct proc *p = req->p;
2363 struct uthread *ut;
2364 int new_value, old_value, changed;
2365 int error;
2366
2367 ut = get_bsdthread_info(current_thread());
2368
2369 if (ut->uu_flag & UT_RAGE_VNODES)
2370 old_value = KERN_RAGE_THREAD;
2371 else if (p->p_lflag & P_LRAGE_VNODES)
2372 old_value = KERN_RAGE_PROC;
2373 else
2374 old_value = 0;
2375
2376 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2377
2378 if (error == 0) {
2379 switch (new_value) {
2380 case KERN_RAGE_PROC:
2381 proc_lock(p);
2382 p->p_lflag |= P_LRAGE_VNODES;
2383 proc_unlock(p);
2384 break;
2385 case KERN_UNRAGE_PROC:
2386 proc_lock(p);
2387 p->p_lflag &= ~P_LRAGE_VNODES;
2388 proc_unlock(p);
2389 break;
2390
2391 case KERN_RAGE_THREAD:
2392 ut->uu_flag |= UT_RAGE_VNODES;
2393 break;
2394 case KERN_UNRAGE_THREAD:
2395 ut = get_bsdthread_info(current_thread());
2396 ut->uu_flag &= ~UT_RAGE_VNODES;
2397 break;
2398 }
2399 }
2400 return(error);
2401 }
2402
2403 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2404 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2405 0, 0, sysctl_rage_vnode, "I", "");
2406
2407 /* XXX move this interface into libproc and remove this sysctl */
2408 STATIC int
2409 sysctl_setthread_cpupercent
2410 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2411 {
2412 int new_value, old_value;
2413 int error = 0;
2414 kern_return_t kret = KERN_SUCCESS;
2415 uint8_t percent = 0;
2416 int ms_refill = 0;
2417
2418 if (!req->newptr)
2419 return (0);
2420
2421 old_value = 0;
2422
2423 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2424 return (error);
2425
2426 percent = new_value & 0xff; /* low 8 bytes for perent */
2427 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2428 if (percent > 100)
2429 return (EINVAL);
2430
2431 /*
2432 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2433 */
2434 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2435 return (EIO);
2436
2437 return (0);
2438 }
2439
2440 SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2441 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2442 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2443
2444
2445 STATIC int
2446 sysctl_kern_check_openevt
2447 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2448 {
2449 struct proc *p = req->p;
2450 int new_value, old_value, changed;
2451 int error;
2452
2453 if (p->p_flag & P_CHECKOPENEVT) {
2454 old_value = KERN_OPENEVT_PROC;
2455 } else {
2456 old_value = 0;
2457 }
2458
2459 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2460
2461 if (error == 0) {
2462 switch (new_value) {
2463 case KERN_OPENEVT_PROC:
2464 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2465 break;
2466
2467 case KERN_UNOPENEVT_PROC:
2468 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2469 break;
2470
2471 default:
2472 error = EINVAL;
2473 }
2474 }
2475 return(error);
2476 }
2477
2478 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2479 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2480
2481
2482
2483 STATIC int
2484 sysctl_nx
2485 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2486 {
2487 #ifdef SECURE_KERNEL
2488 (void)req;
2489 return ENOTSUP;
2490 #else
2491 int new_value, changed;
2492 int error;
2493
2494 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2495 if (error)
2496 return error;
2497
2498 if (changed) {
2499 #if defined(__i386__) || defined(__x86_64__)
2500 /*
2501 * Only allow setting if NX is supported on the chip
2502 */
2503 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2504 return ENOTSUP;
2505 #endif
2506 nx_enabled = new_value;
2507 }
2508 return(error);
2509 #endif /* SECURE_KERNEL */
2510 }
2511
2512
2513
2514 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2515 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2516 0, 0, sysctl_nx, "I", "");
2517
2518 STATIC int
2519 sysctl_loadavg
2520 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2521 {
2522 if (proc_is64bit(req->p)) {
2523 struct user64_loadavg loadinfo64 = {};
2524 fill_loadavg64(&averunnable, &loadinfo64);
2525 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2526 } else {
2527 struct user32_loadavg loadinfo32 = {};
2528 fill_loadavg32(&averunnable, &loadinfo32);
2529 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2530 }
2531 }
2532
2533 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2534 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2535 0, 0, sysctl_loadavg, "S,loadavg", "");
2536
2537 /*
2538 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2539 */
2540 STATIC int
2541 sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2542 __unused int arg2, struct sysctl_req *req)
2543 {
2544 int old_value=0, new_value=0, error=0;
2545
2546 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2547 return(error);
2548 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2549 if (!error) {
2550 return (vm_toggle_entry_reuse(new_value, NULL));
2551 }
2552 return(error);
2553 }
2554
2555 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2556
2557
2558 STATIC int
2559 sysctl_swapusage
2560 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2561 {
2562 int error;
2563 uint64_t swap_total;
2564 uint64_t swap_avail;
2565 vm_size_t swap_pagesize;
2566 boolean_t swap_encrypted;
2567 struct xsw_usage xsu = {};
2568
2569 error = macx_swapinfo(&swap_total,
2570 &swap_avail,
2571 &swap_pagesize,
2572 &swap_encrypted);
2573 if (error)
2574 return error;
2575
2576 xsu.xsu_total = swap_total;
2577 xsu.xsu_avail = swap_avail;
2578 xsu.xsu_used = swap_total - swap_avail;
2579 xsu.xsu_pagesize = swap_pagesize;
2580 xsu.xsu_encrypted = swap_encrypted;
2581 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2582 }
2583
2584
2585
2586 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2587 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2588 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2589
2590 #if CONFIG_FREEZE
2591 extern void vm_page_reactivate_all_throttled(void);
2592
2593 static int
2594 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2595 {
2596 #pragma unused(arg1, arg2)
2597 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2598 boolean_t disabled;
2599
2600 error = sysctl_handle_int(oidp, &val, 0, req);
2601 if (error || !req->newptr)
2602 return (error);
2603
2604 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
2605 //assert(req->newptr);
2606 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2607 return EINVAL;
2608 }
2609
2610 /*
2611 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2612 */
2613 disabled = (!val && memorystatus_freeze_enabled);
2614
2615 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2616
2617 if (disabled) {
2618 vm_page_reactivate_all_throttled();
2619 }
2620
2621 return (0);
2622 }
2623
2624 SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2625 #endif /* CONFIG_FREEZE */
2626
2627 /* this kernel does NOT implement shared_region_make_private_np() */
2628 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2629 CTLFLAG_RD | CTLFLAG_LOCKED,
2630 (int *)NULL, 0, "");
2631
2632 STATIC int
2633 fetch_process_cputype(
2634 proc_t cur_proc,
2635 int *name,
2636 u_int namelen,
2637 cpu_type_t *cputype)
2638 {
2639 proc_t p = PROC_NULL;
2640 int refheld = 0;
2641 cpu_type_t ret = 0;
2642 int error = 0;
2643
2644 if (namelen == 0)
2645 p = cur_proc;
2646 else if (namelen == 1) {
2647 p = proc_find(name[0]);
2648 if (p == NULL)
2649 return (EINVAL);
2650 refheld = 1;
2651 } else {
2652 error = EINVAL;
2653 goto out;
2654 }
2655
2656 ret = cpu_type() & ~CPU_ARCH_MASK;
2657 if (IS_64BIT_PROCESS(p))
2658 ret |= CPU_ARCH_ABI64;
2659
2660 *cputype = ret;
2661
2662 if (refheld != 0)
2663 proc_rele(p);
2664 out:
2665 return (error);
2666 }
2667
2668 STATIC int
2669 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2670 struct sysctl_req *req)
2671 {
2672 int error;
2673 cpu_type_t proc_cputype = 0;
2674 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2675 return error;
2676 int res = 1;
2677 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2678 res = 0;
2679 return SYSCTL_OUT(req, &res, sizeof(res));
2680 }
2681 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2682
2683 STATIC int
2684 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2685 struct sysctl_req *req)
2686 {
2687 int error;
2688 cpu_type_t proc_cputype = 0;
2689 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2690 return error;
2691 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2692 }
2693 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2694
2695 STATIC int
2696 sysctl_safeboot
2697 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2698 {
2699 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2700 }
2701
2702 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2703 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2704 0, 0, sysctl_safeboot, "I", "");
2705
2706 STATIC int
2707 sysctl_singleuser
2708 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2709 {
2710 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2711 }
2712
2713 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2714 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2715 0, 0, sysctl_singleuser, "I", "");
2716
2717 STATIC int sysctl_minimalboot
2718 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2719 {
2720 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
2721 }
2722
2723 SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
2724 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2725 0, 0, sysctl_minimalboot, "I", "");
2726
2727 /*
2728 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2729 */
2730 extern boolean_t affinity_sets_enabled;
2731 extern int affinity_sets_mapping;
2732
2733 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2734 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2735 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2736 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2737
2738 /*
2739 * Boolean indicating if KASLR is active.
2740 */
2741 STATIC int
2742 sysctl_slide
2743 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2744 {
2745 uint32_t slide;
2746
2747 slide = vm_kernel_slide ? 1 : 0;
2748
2749 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2750 }
2751
2752 SYSCTL_PROC(_kern, OID_AUTO, slide,
2753 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2754 0, 0, sysctl_slide, "I", "");
2755
2756 /*
2757 * Limit on total memory users can wire.
2758 *
2759 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2760 *
2761 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2762 *
2763 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2764 * kmem_init().
2765 *
2766 * All values are in bytes.
2767 */
2768
2769 vm_map_size_t vm_global_no_user_wire_amount;
2770 vm_map_size_t vm_global_user_wire_limit;
2771 vm_map_size_t vm_user_wire_limit;
2772
2773 /*
2774 * There needs to be a more automatic/elegant way to do this
2775 */
2776 #if defined(__ARM__)
2777 SYSCTL_INT(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, 0, "");
2778 SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
2779 SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, 0, "");
2780 #else
2781 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2782 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2783 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
2784 #endif
2785
2786 extern int vm_map_copy_overwrite_aligned_src_not_internal;
2787 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2788 extern int vm_map_copy_overwrite_aligned_src_large;
2789 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2790 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2791 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
2792
2793
2794 extern uint32_t vm_page_external_count;
2795 extern uint32_t vm_page_filecache_min;
2796
2797 SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2798 SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2799
2800 extern int vm_compressor_mode;
2801 extern int vm_compressor_is_active;
2802 extern int vm_compressor_available;
2803 extern uint32_t vm_ripe_target_age;
2804 extern uint32_t swapout_target_age;
2805 extern int64_t compressor_bytes_used;
2806 extern int64_t c_segment_input_bytes;
2807 extern int64_t c_segment_compressed_bytes;
2808 extern uint32_t compressor_eval_period_in_msecs;
2809 extern uint32_t compressor_sample_min_in_msecs;
2810 extern uint32_t compressor_sample_max_in_msecs;
2811 extern uint32_t compressor_thrashing_threshold_per_10msecs;
2812 extern uint32_t compressor_thrashing_min_per_10msecs;
2813 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
2814 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
2815 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
2816 extern uint32_t vm_compressor_catchup_threshold_divisor;
2817 extern uint32_t vm_compressor_time_thread;
2818 #if DEVELOPMENT || DEBUG
2819 extern vmct_stats_t vmct_stats;
2820 #endif
2821
2822 SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
2823 SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
2824 SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2825
2826 SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
2827 SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
2828 SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
2829 SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
2830
2831 SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
2832
2833 SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2834 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2835 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2836 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2837 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2838 SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2839 SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2840 SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2841 SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2842
2843 SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2844
2845 SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
2846
2847 #if DEVELOPMENT || DEBUG
2848 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[0], "");
2849 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[1], "");
2850
2851 SYSCTL_QUAD(_vm, OID_AUTO, compressor_threads_total, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_cthreads_total, "");
2852
2853 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[0], "");
2854 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[1], "");
2855
2856 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[0], "");
2857 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[1], "");
2858
2859 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[0], 0, "");
2860 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[1], 0, "");
2861
2862 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, "");
2863 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, "");
2864
2865 #endif
2866
2867 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
2868 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
2869 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
2870 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
2871 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
2872
2873 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
2874 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
2875
2876 SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
2877
2878 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
2879
2880 SYSCTL_QUAD(_vm, OID_AUTO, wk_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_cabstime, "");
2881
2882 SYSCTL_QUAD(_vm, OID_AUTO, wkh_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_cabstime, "");
2883 SYSCTL_QUAD(_vm, OID_AUTO, wkh_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_compressions, "");
2884
2885 SYSCTL_QUAD(_vm, OID_AUTO, wks_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_cabstime, "");
2886 SYSCTL_QUAD(_vm, OID_AUTO, wks_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressions, "");
2887
2888 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
2889 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
2890 SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
2891 SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
2892 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
2893 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
2894
2895 SYSCTL_QUAD(_vm, OID_AUTO, wks_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressed_bytes, "");
2896 SYSCTL_QUAD(_vm, OID_AUTO, wks_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compression_failures, "");
2897 SYSCTL_QUAD(_vm, OID_AUTO, wks_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_sv_compressions, "");
2898
2899
2900 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
2901
2902 SYSCTL_QUAD(_vm, OID_AUTO, wk_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_dabstime, "");
2903
2904 SYSCTL_QUAD(_vm, OID_AUTO, wkh_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_dabstime, "");
2905 SYSCTL_QUAD(_vm, OID_AUTO, wkh_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_decompressions, "");
2906
2907 SYSCTL_QUAD(_vm, OID_AUTO, wks_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_dabstime, "");
2908 SYSCTL_QUAD(_vm, OID_AUTO, wks_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_decompressions, "");
2909
2910 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
2911 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
2912
2913 SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
2914 SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
2915 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
2916 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
2917 SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
2918 SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
2919 SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
2920 SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
2921 #if DEVELOPMENT || DEBUG
2922 extern int vm_compressor_current_codec;
2923 extern int vm_compressor_test_seg_wp;
2924 extern boolean_t vm_compressor_force_sw_wkdm;
2925 SYSCTL_INT(_vm, OID_AUTO, compressor_codec, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_current_codec, 0, "");
2926 SYSCTL_INT(_vm, OID_AUTO, compressor_test_wp, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_test_seg_wp, 0, "");
2927
2928 SYSCTL_INT(_vm, OID_AUTO, wksw_force, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_force_sw_wkdm, 0, "");
2929 extern int precompy, wkswhw;
2930
2931 SYSCTL_INT(_vm, OID_AUTO, precompy, CTLFLAG_RW | CTLFLAG_LOCKED, &precompy, 0, "");
2932 SYSCTL_INT(_vm, OID_AUTO, wkswhw, CTLFLAG_RW | CTLFLAG_LOCKED, &wkswhw, 0, "");
2933 extern unsigned int vm_ktrace_enabled;
2934 SYSCTL_INT(_vm, OID_AUTO, vm_ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ktrace_enabled, 0, "");
2935 #endif
2936
2937 #if CONFIG_PHANTOM_CACHE
2938 extern uint32_t phantom_cache_thrashing_threshold;
2939 extern uint32_t phantom_cache_eval_period_in_msecs;
2940 extern uint32_t phantom_cache_thrashing_threshold_ssd;
2941
2942
2943 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2944 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2945 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2946 #endif
2947
2948 #if CONFIG_BACKGROUND_QUEUE
2949
2950 extern uint32_t vm_page_background_count;
2951 extern uint32_t vm_page_background_target;
2952 extern uint32_t vm_page_background_internal_count;
2953 extern uint32_t vm_page_background_external_count;
2954 extern uint32_t vm_page_background_mode;
2955 extern uint32_t vm_page_background_exclude_external;
2956 extern uint64_t vm_page_background_promoted_count;
2957 extern uint64_t vm_pageout_considered_bq_internal;
2958 extern uint64_t vm_pageout_considered_bq_external;
2959 extern uint64_t vm_pageout_rejected_bq_internal;
2960 extern uint64_t vm_pageout_rejected_bq_external;
2961
2962 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
2963 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
2964 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
2965 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
2966 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
2967 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
2968
2969 SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
2970 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_internal, "");
2971 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_external, "");
2972 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
2973 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
2974
2975 #endif
2976
2977 #if (DEVELOPMENT || DEBUG)
2978
2979 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
2980 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2981 &vm_page_creation_throttled_hard, 0, "");
2982
2983 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
2984 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2985 &vm_page_creation_throttled_soft, 0, "");
2986
2987 extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
2988 extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
2989 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
2990 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
2991
2992 extern uint32_t vm_grab_anon_overrides;
2993 extern uint32_t vm_grab_anon_nops;
2994
2995 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_overrides, 0, "");
2996 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_nops, 0, "");
2997
2998 /* log message counters for persistence mode */
2999 extern uint32_t oslog_p_total_msgcount;
3000 extern uint32_t oslog_p_metadata_saved_msgcount;
3001 extern uint32_t oslog_p_metadata_dropped_msgcount;
3002 extern uint32_t oslog_p_error_count;
3003 extern uint32_t oslog_p_saved_msgcount;
3004 extern uint32_t oslog_p_dropped_msgcount;
3005 extern uint32_t oslog_p_boot_dropped_msgcount;
3006
3007 /* log message counters for streaming mode */
3008 extern uint32_t oslog_s_total_msgcount;
3009 extern uint32_t oslog_s_metadata_msgcount;
3010 extern uint32_t oslog_s_error_count;
3011 extern uint32_t oslog_s_streamed_msgcount;
3012 extern uint32_t oslog_s_dropped_msgcount;
3013
3014 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
3015 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
3016 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
3017 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
3018 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
3019 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
3020 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
3021
3022 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
3023 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
3024 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
3025 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
3026 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
3027
3028
3029 #endif /* DEVELOPMENT || DEBUG */
3030
3031 /*
3032 * Enable tracing of voucher contents
3033 */
3034 extern uint32_t ipc_voucher_trace_contents;
3035
3036 SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
3037 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
3038
3039 /*
3040 * Kernel stack size and depth
3041 */
3042 SYSCTL_INT (_kern, OID_AUTO, stack_size,
3043 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
3044 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
3045 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
3046
3047 extern unsigned int kern_feature_overrides;
3048 SYSCTL_INT (_kern, OID_AUTO, kern_feature_overrides,
3049 CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
3050
3051 /*
3052 * enable back trace for port allocations
3053 */
3054 extern int ipc_portbt;
3055
3056 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
3057 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3058 &ipc_portbt, 0, "");
3059
3060 /*
3061 * Scheduler sysctls
3062 */
3063
3064 SYSCTL_STRING(_kern, OID_AUTO, sched,
3065 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3066 sched_string, sizeof(sched_string),
3067 "Timeshare scheduler implementation");
3068
3069 /*
3070 * Only support runtime modification on embedded platforms
3071 * with development config enabled
3072 */
3073 #if CONFIG_EMBEDDED
3074 #if !SECURE_KERNEL
3075 extern int precise_user_kernel_time;
3076 SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
3077 CTLFLAG_RW | CTLFLAG_LOCKED,
3078 &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
3079 #endif
3080 #endif
3081
3082
3083 /* Parameters related to timer coalescing tuning, to be replaced
3084 * with a dedicated systemcall in the future.
3085 */
3086 /* Enable processing pending timers in the context of any other interrupt
3087 * Coalescing tuning parameters for various thread/task attributes */
3088 STATIC int
3089 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3090 {
3091 #pragma unused(oidp)
3092 int size = arg2; /* subcommand*/
3093 int error;
3094 int changed = 0;
3095 uint64_t old_value_ns;
3096 uint64_t new_value_ns;
3097 uint64_t value_abstime;
3098 if (size == sizeof(uint32_t))
3099 value_abstime = *((uint32_t *)arg1);
3100 else if (size == sizeof(uint64_t))
3101 value_abstime = *((uint64_t *)arg1);
3102 else return ENOTSUP;
3103
3104 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
3105 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
3106 if ((error) || (!changed))
3107 return error;
3108
3109 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
3110 if (size == sizeof(uint32_t))
3111 *((uint32_t *)arg1) = (uint32_t)value_abstime;
3112 else
3113 *((uint64_t *)arg1) = value_abstime;
3114 return error;
3115 }
3116
3117 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
3118 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3119 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
3120 SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
3121 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3122 &tcoal_prio_params.timer_resort_threshold_abstime,
3123 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
3124 sysctl_timer_user_us_kernel_abstime,
3125 "Q", "");
3126 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
3127 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3128 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
3129 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
3130 sysctl_timer_user_us_kernel_abstime,
3131 "Q", "");
3132
3133 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
3134 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3135 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
3136
3137 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
3138 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3139 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
3140 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
3141 sysctl_timer_user_us_kernel_abstime,
3142 "Q", "");
3143
3144 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
3145 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3146 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
3147
3148 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
3149 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3150 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
3151 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
3152 sysctl_timer_user_us_kernel_abstime,
3153 "Q", "");
3154
3155 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
3156 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3157 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
3158
3159 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
3160 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3161 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
3162 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
3163 sysctl_timer_user_us_kernel_abstime,
3164 "Q", "");
3165
3166 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
3167 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3168 &tcoal_prio_params.latency_qos_scale[0], 0, "");
3169
3170 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
3171 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3172 &tcoal_prio_params.latency_qos_abstime_max[0],
3173 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
3174 sysctl_timer_user_us_kernel_abstime,
3175 "Q", "");
3176
3177 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
3178 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3179 &tcoal_prio_params.latency_qos_scale[1], 0, "");
3180
3181 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
3182 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3183 &tcoal_prio_params.latency_qos_abstime_max[1],
3184 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
3185 sysctl_timer_user_us_kernel_abstime,
3186 "Q", "");
3187
3188 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
3189 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3190 &tcoal_prio_params.latency_qos_scale[2], 0, "");
3191
3192 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
3193 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3194 &tcoal_prio_params.latency_qos_abstime_max[2],
3195 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
3196 sysctl_timer_user_us_kernel_abstime,
3197 "Q", "");
3198
3199 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
3200 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3201 &tcoal_prio_params.latency_qos_scale[3], 0, "");
3202
3203 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
3204 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3205 &tcoal_prio_params.latency_qos_abstime_max[3],
3206 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
3207 sysctl_timer_user_us_kernel_abstime,
3208 "Q", "");
3209
3210 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
3211 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3212 &tcoal_prio_params.latency_qos_scale[4], 0, "");
3213
3214 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
3215 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3216 &tcoal_prio_params.latency_qos_abstime_max[4],
3217 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
3218 sysctl_timer_user_us_kernel_abstime,
3219 "Q", "");
3220
3221 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
3222 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3223 &tcoal_prio_params.latency_qos_scale[5], 0, "");
3224
3225 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
3226 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3227 &tcoal_prio_params.latency_qos_abstime_max[5],
3228 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
3229 sysctl_timer_user_us_kernel_abstime,
3230 "Q", "");
3231
3232 /* Communicate the "user idle level" heuristic to the timer layer, and
3233 * potentially other layers in the future.
3234 */
3235
3236 static int
3237 timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3238 int new_value = 0, old_value = 0, changed = 0, error;
3239
3240 old_value = timer_get_user_idle_level();
3241
3242 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3243
3244 if (error == 0 && changed) {
3245 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
3246 error = ERANGE;
3247 }
3248
3249 return error;
3250 }
3251
3252 SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
3253 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3254 0, 0,
3255 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
3256
3257 #if HYPERVISOR
3258 SYSCTL_INT(_kern, OID_AUTO, hv_support,
3259 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
3260 &hv_support_available, 0, "");
3261 #endif
3262
3263 #if CONFIG_EMBEDDED
3264 STATIC int
3265 sysctl_darkboot SYSCTL_HANDLER_ARGS
3266 {
3267 int err = 0, value = 0;
3268 #pragma unused(oidp, arg1, arg2, err, value, req)
3269
3270 /*
3271 * Handle the sysctl request.
3272 *
3273 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3274 * we'll get the request identifier into "value" and then we can honor it.
3275 */
3276 if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
3277 goto exit;
3278 }
3279
3280 /* writing requested, let's process the request */
3281 if (req->newptr) {
3282 /* writing is protected by an entitlement */
3283 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
3284 err = EPERM;
3285 goto exit;
3286 }
3287
3288 switch (value) {
3289 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
3290 /*
3291 * If the darkboot sysctl is unset, the NVRAM variable
3292 * must be unset too. If that's not the case, it means
3293 * someone is doing something crazy and not supported.
3294 */
3295 if (darkboot != 0) {
3296 int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
3297 if (ret) {
3298 darkboot = 0;
3299 } else {
3300 err = EINVAL;
3301 }
3302 }
3303 break;
3304 case MEMORY_MAINTENANCE_DARK_BOOT_SET:
3305 darkboot = 1;
3306 break;
3307 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
3308 /*
3309 * Set the NVRAM and update 'darkboot' in case
3310 * of success. Otherwise, do not update
3311 * 'darkboot' and report the failure.
3312 */
3313 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
3314 darkboot = 1;
3315 } else {
3316 err = EINVAL;
3317 }
3318
3319 break;
3320 }
3321 default:
3322 err = EINVAL;
3323 }
3324 }
3325
3326 exit:
3327 return err;
3328 }
3329
3330 SYSCTL_PROC(_kern, OID_AUTO, darkboot,
3331 CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3332 0, 0, sysctl_darkboot, "I", "");
3333 #endif
3334
3335 /*
3336 * This is set by core audio to tell tailspin (ie background tracing) how long
3337 * its smallest buffer is. Background tracing can then try to make a reasonable
3338 * decisions to try to avoid introducing so much latency that the buffers will
3339 * underflow.
3340 */
3341
3342 int min_audio_buffer_usec;
3343
3344 STATIC int
3345 sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3346 {
3347 #pragma unused(oidp, arg1, arg2)
3348 int err = 0, value = 0, changed = 0;
3349 err = sysctl_io_number(req, min_audio_buffer_usec, sizeof(int), &value, &changed);
3350 if (err) goto exit;
3351
3352 if (changed) {
3353 /* writing is protected by an entitlement */
3354 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY, 0) != 0) {
3355 err = EPERM;
3356 goto exit;
3357 }
3358 min_audio_buffer_usec = value;
3359 }
3360 exit:
3361 return err;
3362 }
3363
3364 SYSCTL_PROC(_kern, OID_AUTO, min_audio_buffer_usec, CTLFLAG_RW | CTLFLAG_ANYBODY, 0, 0, sysctl_audio_buffer, "I", "Minimum audio buffer size, in microseconds");
3365
3366 #if DEVELOPMENT || DEBUG
3367 #include <sys/sysent.h>
3368 /* This should result in a fatal exception, verifying that "sysent" is
3369 * write-protected.
3370 */
3371 static int
3372 kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3373 uint64_t new_value = 0, old_value = 0;
3374 int changed = 0, error;
3375
3376 error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
3377 if ((error == 0) && changed) {
3378 volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
3379 *wraddr = 0;
3380 printf("sysent[0] write succeeded\n");
3381 }
3382 return error;
3383 }
3384
3385 SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
3386 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3387 0, 0,
3388 kern_sysent_write, "I", "Attempt sysent[0] write");
3389
3390 #endif
3391
3392 #if DEVELOPMENT || DEBUG
3393 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
3394 #else
3395 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
3396 #endif
3397
3398
3399 #if DEVELOPMENT || DEBUG
3400
3401 static int
3402 sysctl_panic_test SYSCTL_HANDLER_ARGS
3403 {
3404 #pragma unused(arg1, arg2)
3405 int rval = 0;
3406 char str[32] = "entry prelog postlog postcore";
3407
3408 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3409
3410 if (rval == 0 && req->newptr) {
3411 if (strncmp("entry", str, strlen("entry")) == 0) {
3412 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
3413 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3414 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
3415 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3416 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
3417 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3418 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
3419 }
3420 }
3421
3422 return rval;
3423 }
3424
3425 static int
3426 sysctl_debugger_test SYSCTL_HANDLER_ARGS
3427 {
3428 #pragma unused(arg1, arg2)
3429 int rval = 0;
3430 char str[32] = "entry prelog postlog postcore";
3431
3432 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3433
3434 if (rval == 0 && req->newptr) {
3435 if (strncmp("entry", str, strlen("entry")) == 0) {
3436 DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
3437 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3438 DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
3439 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3440 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
3441 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3442 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
3443 }
3444 }
3445
3446 return rval;
3447 }
3448
3449 decl_lck_spin_data(, spinlock_panic_test_lock)
3450
3451 __attribute__((noreturn))
3452 static void
3453 spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
3454 {
3455 lck_spin_lock(&spinlock_panic_test_lock);
3456 while (1) { ; }
3457 }
3458
3459 static int
3460 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
3461 {
3462 #pragma unused(oidp, arg1, arg2)
3463 if (req->newlen == 0)
3464 return EINVAL;
3465
3466 thread_t panic_spinlock_thread;
3467 /* Initialize panic spinlock */
3468 lck_grp_t * panic_spinlock_grp;
3469 lck_grp_attr_t * panic_spinlock_grp_attr;
3470 lck_attr_t * panic_spinlock_attr;
3471
3472 panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
3473 panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
3474 panic_spinlock_attr = lck_attr_alloc_init();
3475
3476 lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
3477
3478
3479 /* Create thread to acquire spinlock */
3480 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
3481 return EBUSY;
3482 }
3483
3484 /* Try to acquire spinlock -- should panic eventually */
3485 lck_spin_lock(&spinlock_panic_test_lock);
3486 while(1) { ; }
3487 }
3488
3489 __attribute__((noreturn))
3490 static void
3491 simultaneous_panic_worker
3492 (void * arg, wait_result_t wres __unused)
3493 {
3494 atomic_int *start_panic = (atomic_int *)arg;
3495
3496 while (!atomic_load(start_panic)) { ; }
3497 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
3498 __builtin_unreachable();
3499 }
3500
3501 static int
3502 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
3503 {
3504 #pragma unused(oidp, arg1, arg2)
3505 if (req->newlen == 0)
3506 return EINVAL;
3507
3508 int i = 0, threads_to_create = 2 * processor_count;
3509 atomic_int start_panic = 0;
3510 unsigned int threads_created = 0;
3511 thread_t new_panic_thread;
3512
3513 for (i = threads_to_create; i > 0; i--) {
3514 if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
3515 threads_created++;
3516 }
3517 }
3518
3519 /* FAIL if we couldn't create at least processor_count threads */
3520 if (threads_created < processor_count) {
3521 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
3522 threads_created, threads_to_create);
3523 }
3524
3525 atomic_exchange(&start_panic, 1);
3526 while (1) { ; }
3527 }
3528
3529 SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
3530 SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
3531 SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
3532 SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
3533
3534
3535 #endif /* DEVELOPMENT || DEBUG */
3536
3537 const uint32_t thread_groups_supported = 0;
3538
3539 STATIC int
3540 sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3541 {
3542 int value = thread_groups_supported;
3543 return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
3544 }
3545
3546 SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
3547 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
3548
3549 static int
3550 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
3551 {
3552 #pragma unused(arg1, arg2, oidp)
3553 int error = 0;
3554 int type_tuple[2] = {};
3555 int return_value = 0;
3556
3557 error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
3558
3559 if (error) {
3560 return error;
3561 }
3562
3563 return_value = grade_binary(type_tuple[0], type_tuple[1]);
3564
3565 error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
3566
3567 if (error) {
3568 return error;
3569 }
3570
3571 return error;
3572 }
3573
3574 SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
3575 CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED|CTLTYPE_OPAQUE,
3576 0, 0, &sysctl_grade_cputype, "S",
3577 "grade value of cpu_type_t+cpu_sub_type_t");