]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107 #include <stdatomic.h>
108
109 #include <security/audit/audit.h>
110 #include <kern/kalloc.h>
111
112 #include <machine/smp.h>
113 #include <mach/machine.h>
114 #include <mach/mach_host.h>
115 #include <mach/mach_types.h>
116 #include <mach/processor_info.h>
117 #include <mach/vm_param.h>
118 #include <kern/debug.h>
119 #include <kern/mach_param.h>
120 #include <kern/task.h>
121 #include <kern/thread.h>
122 #include <kern/thread_group.h>
123 #include <kern/processor.h>
124 #include <kern/cpu_number.h>
125 #include <kern/debug.h>
126 #include <kern/sched_prim.h>
127 #include <vm/vm_kern.h>
128 #include <vm/vm_map.h>
129 #include <mach/host_info.h>
130
131 #include <sys/mount_internal.h>
132 #include <sys/kdebug.h>
133
134 #include <IOKit/IOPlatformExpert.h>
135 #include <pexpert/pexpert.h>
136
137 #include <machine/machine_routines.h>
138 #include <machine/exec.h>
139
140 #include <vm/vm_protos.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_compressor_algorithms.h>
143 #include <sys/imgsrc.h>
144 #include <kern/timer_call.h>
145
146 #if defined(__i386__) || defined(__x86_64__)
147 #include <i386/cpuid.h>
148 #endif
149
150 #if CONFIG_FREEZE
151 #include <sys/kern_memorystatus.h>
152 #endif
153
154 #if KPERF
155 #include <kperf/kperf.h>
156 #endif
157
158 #if HYPERVISOR
159 #include <kern/hv_support.h>
160 #endif
161
162 /*
163 * deliberately setting max requests to really high number
164 * so that runaway settings do not cause MALLOC overflows
165 */
166 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
167
168 extern int aio_max_requests;
169 extern int aio_max_requests_per_process;
170 extern int aio_worker_threads;
171 extern int lowpri_IO_window_msecs;
172 extern int lowpri_IO_delay_msecs;
173 extern int nx_enabled;
174 extern int speculative_reads_disabled;
175 extern unsigned int speculative_prefetch_max;
176 extern unsigned int speculative_prefetch_max_iosize;
177 extern unsigned int preheat_max_bytes;
178 extern unsigned int preheat_min_bytes;
179 extern long numvnodes;
180
181 extern uuid_string_t bootsessionuuid_string;
182
183 extern unsigned int vm_max_delayed_work_limit;
184 extern unsigned int vm_max_batch;
185
186 extern unsigned int vm_page_free_min;
187 extern unsigned int vm_page_free_target;
188 extern unsigned int vm_page_free_reserved;
189 extern unsigned int vm_page_speculative_percentage;
190 extern unsigned int vm_page_speculative_q_age_ms;
191
192 #if (DEVELOPMENT || DEBUG)
193 extern uint32_t vm_page_creation_throttled_hard;
194 extern uint32_t vm_page_creation_throttled_soft;
195 #endif /* DEVELOPMENT || DEBUG */
196
197 /*
198 * Conditionally allow dtrace to see these functions for debugging purposes.
199 */
200 #ifdef STATIC
201 #undef STATIC
202 #endif
203 #if 0
204 #define STATIC
205 #else
206 #define STATIC static
207 #endif
208
209 extern boolean_t mach_timer_coalescing_enabled;
210
211 extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
212
213 STATIC void
214 fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
215 STATIC void
216 fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
217 STATIC void
218 fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
219 STATIC void
220 fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
221 STATIC void
222 fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
223 STATIC void
224 fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
225
226 extern int
227 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
228 #if NFSCLIENT
229 extern int
230 netboot_root(void);
231 #endif
232 int
233 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
234 proc_t p);
235 int
236 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
237 size_t *sizep, proc_t cur_proc);
238 STATIC int
239 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
240 proc_t cur_proc, int argc_yes);
241 int
242 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
243 size_t newlen, void *sp, int len);
244
245 STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
246 STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
247 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
248 STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
249 STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
250 int sysdoproc_callback(proc_t p, void *arg);
251
252
253 /* forward declarations for non-static STATIC */
254 STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
255 STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
256 STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
257 STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
258 STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
259 STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
260 #if COUNT_SYSCALLS
261 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
262 #endif /* COUNT_SYSCALLS */
263 #if !CONFIG_EMBEDDED
264 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
265 #endif /* !CONFIG_EMBEDDED */
266 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
267 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
268 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269 STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270 STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271 STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272 STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273 STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274 STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275 STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
276 STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277 STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278 STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
279 STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280 STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
281 #if NFSCLIENT
282 STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283 #endif
284 #ifdef CONFIG_IMGSRC_ACCESS
285 STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286 #endif
287 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
288 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289 #if CONFIG_COREDUMP
290 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292 #endif
293 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296 STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297 STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298 STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299 STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
300 STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
301 STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
302 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
303 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
304 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
305 STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
306 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
307
308
309 extern void IORegistrySetOSBuildVersion(char * build_version);
310
311 STATIC void
312 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
313 {
314 la64->ldavg[0] = la->ldavg[0];
315 la64->ldavg[1] = la->ldavg[1];
316 la64->ldavg[2] = la->ldavg[2];
317 la64->fscale = (user64_long_t)la->fscale;
318 }
319
320 STATIC void
321 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
322 {
323 la32->ldavg[0] = la->ldavg[0];
324 la32->ldavg[1] = la->ldavg[1];
325 la32->ldavg[2] = la->ldavg[2];
326 la32->fscale = (user32_long_t)la->fscale;
327 }
328
329 #if CONFIG_COREDUMP
330 /*
331 * Attributes stored in the kernel.
332 */
333 extern char corefilename[MAXPATHLEN+1];
334 extern int do_coredump;
335 extern int sugid_coredump;
336 #endif
337
338 #if COUNT_SYSCALLS
339 extern int do_count_syscalls;
340 #endif
341
342 #ifdef INSECURE
343 int securelevel = -1;
344 #else
345 int securelevel;
346 #endif
347
348 STATIC int
349 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
350 __unused int arg2, struct sysctl_req *req)
351 {
352 int error;
353 struct uthread *ut = get_bsdthread_info(current_thread());
354 user_addr_t oldp=0, newp=0;
355 size_t *oldlenp=NULL;
356 size_t newlen=0;
357
358 oldp = req->oldptr;
359 oldlenp = &(req->oldlen);
360 newp = req->newptr;
361 newlen = req->newlen;
362
363 /* We want the current length, and maybe the string itself */
364 if(oldlenp) {
365 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
366 size_t currlen = MAXTHREADNAMESIZE - 1;
367
368 if(ut->pth_name)
369 /* use length of current thread name */
370 currlen = strlen(ut->pth_name);
371 if(oldp) {
372 if(*oldlenp < currlen)
373 return ENOMEM;
374 /* NOTE - we do not copy the NULL terminator */
375 if(ut->pth_name) {
376 error = copyout(ut->pth_name,oldp,currlen);
377 if(error)
378 return error;
379 }
380 }
381 /* return length of thread name minus NULL terminator (just like strlen) */
382 req->oldidx = currlen;
383 }
384
385 /* We want to set the name to something */
386 if(newp)
387 {
388 if(newlen > (MAXTHREADNAMESIZE - 1))
389 return ENAMETOOLONG;
390 if(!ut->pth_name)
391 {
392 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
393 if(!ut->pth_name)
394 return ENOMEM;
395 } else {
396 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
397 }
398 bzero(ut->pth_name, MAXTHREADNAMESIZE);
399 error = copyin(newp, ut->pth_name, newlen);
400 if (error) {
401 return error;
402 }
403
404 kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
405 }
406
407 return 0;
408 }
409
410 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
411
412 #define BSD_HOST 1
413 STATIC int
414 sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
415 {
416 host_basic_info_data_t hinfo;
417 kern_return_t kret;
418 uint32_t size;
419 int changed;
420 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
421 struct _processor_statistics_np *buf;
422 int error;
423
424 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
425 if (kret != KERN_SUCCESS) {
426 return EINVAL;
427 }
428
429 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
430
431 if (req->oldlen < size) {
432 return EINVAL;
433 }
434
435 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
436
437 kret = get_sched_statistics(buf, &size);
438 if (kret != KERN_SUCCESS) {
439 error = EINVAL;
440 goto out;
441 }
442
443 error = sysctl_io_opaque(req, buf, size, &changed);
444 if (error) {
445 goto out;
446 }
447
448 if (changed) {
449 panic("Sched info changed?!");
450 }
451 out:
452 FREE(buf, M_TEMP);
453 return error;
454 }
455
456 SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
457
458 STATIC int
459 sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
460 {
461 boolean_t active;
462 int res;
463
464 if (req->newlen != sizeof(active)) {
465 return EINVAL;
466 }
467
468 res = copyin(req->newptr, &active, sizeof(active));
469 if (res != 0) {
470 return res;
471 }
472
473 return set_sched_stats_active(active);
474 }
475
476 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
477
478 extern uint32_t sched_debug_flags;
479 SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
480
481 #if (DEBUG || DEVELOPMENT)
482 extern boolean_t doprnt_hide_pointers;
483 SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
484 #endif
485
486 extern int get_kernel_symfile(proc_t, char **);
487
488 #if COUNT_SYSCALLS
489 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
490
491 extern unsigned int nsysent;
492 extern int syscalls_log[];
493 extern const char *syscallnames[];
494
495 STATIC int
496 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
497 {
498 __unused int cmd = oidp->oid_arg2; /* subcommand*/
499 __unused int *name = arg1; /* oid element argument vector */
500 __unused int namelen = arg2; /* number of oid element arguments */
501 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
502 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
503 user_addr_t newp = req->newptr; /* user buffer copy in address */
504 size_t newlen = req->newlen; /* user buffer copy in size */
505 int error;
506
507 int tmp;
508
509 /* valid values passed in:
510 * = 0 means don't keep called counts for each bsd syscall
511 * > 0 means keep called counts for each bsd syscall
512 * = 2 means dump current counts to the system log
513 * = 3 means reset all counts
514 * for example, to dump current counts:
515 * sysctl -w kern.count_calls=2
516 */
517 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
518 if ( error != 0 ) {
519 return (error);
520 }
521
522 if ( tmp == 1 ) {
523 do_count_syscalls = 1;
524 }
525 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
526 int i;
527 for ( i = 0; i < nsysent; i++ ) {
528 if ( syscalls_log[i] != 0 ) {
529 if ( tmp == 2 ) {
530 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
531 }
532 else {
533 syscalls_log[i] = 0;
534 }
535 }
536 }
537 if ( tmp != 0 ) {
538 do_count_syscalls = 1;
539 }
540 }
541
542 /* adjust index so we return the right required/consumed amount */
543 if (!error)
544 req->oldidx += req->oldlen;
545
546 return (error);
547 }
548 SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
549 0, /* Pointer argument (arg1) */
550 0, /* Integer argument (arg2) */
551 sysctl_docountsyscalls, /* Handler function */
552 NULL, /* Data pointer */
553 "");
554 #endif /* COUNT_SYSCALLS */
555
556 /*
557 * The following sysctl_* functions should not be used
558 * any more, as they can only cope with callers in
559 * user mode: Use new-style
560 * sysctl_io_number()
561 * sysctl_io_string()
562 * sysctl_io_opaque()
563 * instead.
564 */
565
566 /*
567 * Validate parameters and get old / set new parameters
568 * for an integer-valued sysctl function.
569 */
570 int
571 sysctl_int(user_addr_t oldp, size_t *oldlenp,
572 user_addr_t newp, size_t newlen, int *valp)
573 {
574 int error = 0;
575
576 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
577 return (EFAULT);
578 if (oldp && *oldlenp < sizeof(int))
579 return (ENOMEM);
580 if (newp && newlen != sizeof(int))
581 return (EINVAL);
582 *oldlenp = sizeof(int);
583 if (oldp)
584 error = copyout(valp, oldp, sizeof(int));
585 if (error == 0 && newp) {
586 error = copyin(newp, valp, sizeof(int));
587 AUDIT_ARG(value32, *valp);
588 }
589 return (error);
590 }
591
592 /*
593 * Validate parameters and get old / set new parameters
594 * for an quad(64bit)-valued sysctl function.
595 */
596 int
597 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
598 user_addr_t newp, size_t newlen, quad_t *valp)
599 {
600 int error = 0;
601
602 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
603 return (EFAULT);
604 if (oldp && *oldlenp < sizeof(quad_t))
605 return (ENOMEM);
606 if (newp && newlen != sizeof(quad_t))
607 return (EINVAL);
608 *oldlenp = sizeof(quad_t);
609 if (oldp)
610 error = copyout(valp, oldp, sizeof(quad_t));
611 if (error == 0 && newp)
612 error = copyin(newp, valp, sizeof(quad_t));
613 return (error);
614 }
615
616 STATIC int
617 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
618 {
619 if (p->p_pid != (pid_t)*(int*)arg)
620 return(0);
621 else
622 return(1);
623 }
624
625 STATIC int
626 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
627 {
628 if (p->p_pgrpid != (pid_t)*(int*)arg)
629 return(0);
630 else
631 return(1);
632 }
633
634 STATIC int
635 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
636 {
637 int retval;
638 struct tty *tp;
639
640 /* This is very racy but list lock is held.. Hmmm. */
641 if ((p->p_flag & P_CONTROLT) == 0 ||
642 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
643 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
644 tp->t_dev != (dev_t)*(int*)arg)
645 retval = 0;
646 else
647 retval = 1;
648
649 return(retval);
650 }
651
652 STATIC int
653 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
654 {
655 kauth_cred_t my_cred;
656 uid_t uid;
657
658 if (p->p_ucred == NULL)
659 return(0);
660 my_cred = kauth_cred_proc_ref(p);
661 uid = kauth_cred_getuid(my_cred);
662 kauth_cred_unref(&my_cred);
663
664 if (uid != (uid_t)*(int*)arg)
665 return(0);
666 else
667 return(1);
668 }
669
670
671 STATIC int
672 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
673 {
674 kauth_cred_t my_cred;
675 uid_t ruid;
676
677 if (p->p_ucred == NULL)
678 return(0);
679 my_cred = kauth_cred_proc_ref(p);
680 ruid = kauth_cred_getruid(my_cred);
681 kauth_cred_unref(&my_cred);
682
683 if (ruid != (uid_t)*(int*)arg)
684 return(0);
685 else
686 return(1);
687 }
688
689 /*
690 * try over estimating by 5 procs
691 */
692 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
693 struct sysdoproc_args {
694 int buflen;
695 void *kprocp;
696 boolean_t is_64_bit;
697 user_addr_t dp;
698 size_t needed;
699 int sizeof_kproc;
700 int *errorp;
701 int uidcheck;
702 int ruidcheck;
703 int ttycheck;
704 int uidval;
705 };
706
707 int
708 sysdoproc_callback(proc_t p, void *arg)
709 {
710 struct sysdoproc_args *args = arg;
711
712 if (args->buflen >= args->sizeof_kproc) {
713 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
714 return (PROC_RETURNED);
715 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
716 return (PROC_RETURNED);
717 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
718 return (PROC_RETURNED);
719
720 bzero(args->kprocp, args->sizeof_kproc);
721 if (args->is_64_bit)
722 fill_user64_proc(p, args->kprocp);
723 else
724 fill_user32_proc(p, args->kprocp);
725 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
726 if (error) {
727 *args->errorp = error;
728 return (PROC_RETURNED_DONE);
729 }
730 args->dp += args->sizeof_kproc;
731 args->buflen -= args->sizeof_kproc;
732 }
733 args->needed += args->sizeof_kproc;
734 return (PROC_RETURNED);
735 }
736
737 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
738 STATIC int
739 sysctl_prochandle SYSCTL_HANDLER_ARGS
740 {
741 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
742 int *name = arg1; /* oid element argument vector */
743 int namelen = arg2; /* number of oid element arguments */
744 user_addr_t where = req->oldptr;/* user buffer copy out address */
745
746 user_addr_t dp = where;
747 size_t needed = 0;
748 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
749 int error = 0;
750 boolean_t is_64_bit = proc_is64bit(current_proc());
751 struct user32_kinfo_proc user32_kproc;
752 struct user64_kinfo_proc user_kproc;
753 int sizeof_kproc;
754 void *kprocp;
755 int (*filterfn)(proc_t, void *) = 0;
756 struct sysdoproc_args args;
757 int uidcheck = 0;
758 int ruidcheck = 0;
759 int ttycheck = 0;
760 int success = 0;
761
762 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
763 return (EINVAL);
764
765 if (is_64_bit) {
766 sizeof_kproc = sizeof(user_kproc);
767 kprocp = &user_kproc;
768 } else {
769 sizeof_kproc = sizeof(user32_kproc);
770 kprocp = &user32_kproc;
771 }
772
773 switch (cmd) {
774
775 case KERN_PROC_PID:
776 filterfn = sysdoproc_filt_KERN_PROC_PID;
777 break;
778
779 case KERN_PROC_PGRP:
780 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
781 break;
782
783 case KERN_PROC_TTY:
784 ttycheck = 1;
785 break;
786
787 case KERN_PROC_UID:
788 uidcheck = 1;
789 break;
790
791 case KERN_PROC_RUID:
792 ruidcheck = 1;
793 break;
794
795 case KERN_PROC_ALL:
796 break;
797
798 default:
799 /* must be kern.proc.<unknown> */
800 return (ENOTSUP);
801 }
802
803 error = 0;
804 args.buflen = buflen;
805 args.kprocp = kprocp;
806 args.is_64_bit = is_64_bit;
807 args.dp = dp;
808 args.needed = needed;
809 args.errorp = &error;
810 args.uidcheck = uidcheck;
811 args.ruidcheck = ruidcheck;
812 args.ttycheck = ttycheck;
813 args.sizeof_kproc = sizeof_kproc;
814 if (namelen)
815 args.uidval = name[0];
816
817 success = proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
818 sysdoproc_callback, &args, filterfn, name);
819
820 /*
821 * rdar://problem/28433391: if we can't iterate over the processes,
822 * make sure to return an error.
823 */
824
825 if (success != 0)
826 return (ENOMEM);
827
828 if (error)
829 return (error);
830
831 dp = args.dp;
832 needed = args.needed;
833
834 if (where != USER_ADDR_NULL) {
835 req->oldlen = dp - where;
836 if (needed > req->oldlen)
837 return (ENOMEM);
838 } else {
839 needed += KERN_PROCSLOP;
840 req->oldlen = needed;
841 }
842 /* adjust index so we return the right required/consumed amount */
843 req->oldidx += req->oldlen;
844 return (0);
845 }
846
847 /*
848 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
849 * in the sysctl declaration itself, which comes into the handler function
850 * as 'oidp->oid_arg2'.
851 *
852 * For these particular sysctls, since they have well known OIDs, we could
853 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
854 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
855 * of a well known value with a common handler function. This is desirable,
856 * because we want well known values to "go away" at some future date.
857 *
858 * It should be noted that the value of '((int *)arg1)[1]' is used for many
859 * an integer parameter to the subcommand for many of these sysctls; we'd
860 * rather have used '((int *)arg1)[0]' for that, or even better, an element
861 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
862 * and then use leaf-node permissions enforcement, but that would have
863 * necessitated modifying user space code to correspond to the interface
864 * change, and we are striving for binary backward compatibility here; even
865 * though these are SPI, and not intended for use by user space applications
866 * which are not themselves system tools or libraries, some applications
867 * have erroneously used them.
868 */
869 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_ALL, /* Integer argument (arg2) */
872 sysctl_prochandle, /* Handler function */
873 NULL, /* Data is size variant on ILP32/LP64 */
874 "");
875 SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_PID, /* Integer argument (arg2) */
878 sysctl_prochandle, /* Handler function */
879 NULL, /* Data is size variant on ILP32/LP64 */
880 "");
881 SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_TTY, /* Integer argument (arg2) */
884 sysctl_prochandle, /* Handler function */
885 NULL, /* Data is size variant on ILP32/LP64 */
886 "");
887 SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
888 0, /* Pointer argument (arg1) */
889 KERN_PROC_PGRP, /* Integer argument (arg2) */
890 sysctl_prochandle, /* Handler function */
891 NULL, /* Data is size variant on ILP32/LP64 */
892 "");
893 SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
894 0, /* Pointer argument (arg1) */
895 KERN_PROC_UID, /* Integer argument (arg2) */
896 sysctl_prochandle, /* Handler function */
897 NULL, /* Data is size variant on ILP32/LP64 */
898 "");
899 SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
900 0, /* Pointer argument (arg1) */
901 KERN_PROC_RUID, /* Integer argument (arg2) */
902 sysctl_prochandle, /* Handler function */
903 NULL, /* Data is size variant on ILP32/LP64 */
904 "");
905 SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
906 0, /* Pointer argument (arg1) */
907 KERN_PROC_LCID, /* Integer argument (arg2) */
908 sysctl_prochandle, /* Handler function */
909 NULL, /* Data is size variant on ILP32/LP64 */
910 "");
911
912
913 /*
914 * Fill in non-zero fields of an eproc structure for the specified process.
915 */
916 STATIC void
917 fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
918 {
919 struct tty *tp;
920 struct pgrp *pg;
921 struct session *sessp;
922 kauth_cred_t my_cred;
923
924 pg = proc_pgrp(p);
925 sessp = proc_session(p);
926
927 if (pg != PGRP_NULL) {
928 ep->e_pgid = p->p_pgrpid;
929 ep->e_jobc = pg->pg_jobc;
930 if (sessp != SESSION_NULL && sessp->s_ttyvp)
931 ep->e_flag = EPROC_CTTY;
932 }
933 ep->e_ppid = p->p_ppid;
934 if (p->p_ucred) {
935 my_cred = kauth_cred_proc_ref(p);
936
937 /* A fake historical pcred */
938 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
939 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
940 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
941 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
942
943 /* A fake historical *kauth_cred_t */
944 ep->e_ucred.cr_ref = my_cred->cr_ref;
945 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
946 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
947 bcopy(posix_cred_get(my_cred)->cr_groups,
948 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
949
950 kauth_cred_unref(&my_cred);
951 }
952
953 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
954 (tp = SESSION_TP(sessp))) {
955 ep->e_tdev = tp->t_dev;
956 ep->e_tpgid = sessp->s_ttypgrpid;
957 } else
958 ep->e_tdev = NODEV;
959
960 if (sessp != SESSION_NULL) {
961 if (SESS_LEADER(p, sessp))
962 ep->e_flag |= EPROC_SLEADER;
963 session_rele(sessp);
964 }
965 if (pg != PGRP_NULL)
966 pg_rele(pg);
967 }
968
969 /*
970 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
971 */
972 STATIC void
973 fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
974 {
975 struct tty *tp;
976 struct pgrp *pg;
977 struct session *sessp;
978 kauth_cred_t my_cred;
979
980 pg = proc_pgrp(p);
981 sessp = proc_session(p);
982
983 if (pg != PGRP_NULL) {
984 ep->e_pgid = p->p_pgrpid;
985 ep->e_jobc = pg->pg_jobc;
986 if (sessp != SESSION_NULL && sessp->s_ttyvp)
987 ep->e_flag = EPROC_CTTY;
988 }
989 ep->e_ppid = p->p_ppid;
990 if (p->p_ucred) {
991 my_cred = kauth_cred_proc_ref(p);
992
993 /* A fake historical pcred */
994 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
995 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
996 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
997 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
998
999 /* A fake historical *kauth_cred_t */
1000 ep->e_ucred.cr_ref = my_cred->cr_ref;
1001 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
1002 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
1003 bcopy(posix_cred_get(my_cred)->cr_groups,
1004 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
1005
1006 kauth_cred_unref(&my_cred);
1007 }
1008
1009 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1010 (tp = SESSION_TP(sessp))) {
1011 ep->e_tdev = tp->t_dev;
1012 ep->e_tpgid = sessp->s_ttypgrpid;
1013 } else
1014 ep->e_tdev = NODEV;
1015
1016 if (sessp != SESSION_NULL) {
1017 if (SESS_LEADER(p, sessp))
1018 ep->e_flag |= EPROC_SLEADER;
1019 session_rele(sessp);
1020 }
1021 if (pg != PGRP_NULL)
1022 pg_rele(pg);
1023 }
1024
1025 /*
1026 * Fill in an eproc structure for the specified process.
1027 * bzeroed by our caller, so only set non-zero fields.
1028 */
1029 STATIC void
1030 fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1031 {
1032 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1033 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1034 exp->p_flag = p->p_flag;
1035 if (p->p_lflag & P_LTRACED)
1036 exp->p_flag |= P_TRACED;
1037 if (p->p_lflag & P_LPPWAIT)
1038 exp->p_flag |= P_PPWAIT;
1039 if (p->p_lflag & P_LEXIT)
1040 exp->p_flag |= P_WEXIT;
1041 exp->p_stat = p->p_stat;
1042 exp->p_pid = p->p_pid;
1043 exp->p_oppid = p->p_oppid;
1044 /* Mach related */
1045 exp->user_stack = p->user_stack;
1046 exp->p_debugger = p->p_debugger;
1047 exp->sigwait = p->sigwait;
1048 /* scheduling */
1049 #ifdef _PROC_HAS_SCHEDINFO_
1050 exp->p_estcpu = p->p_estcpu;
1051 exp->p_pctcpu = p->p_pctcpu;
1052 exp->p_slptime = p->p_slptime;
1053 #endif
1054 exp->p_realtimer.it_interval.tv_sec =
1055 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1056 exp->p_realtimer.it_interval.tv_usec =
1057 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1058
1059 exp->p_realtimer.it_value.tv_sec =
1060 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1061 exp->p_realtimer.it_value.tv_usec =
1062 (__int32_t)p->p_realtimer.it_value.tv_usec;
1063
1064 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1065 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1066
1067 exp->p_sigignore = p->p_sigignore;
1068 exp->p_sigcatch = p->p_sigcatch;
1069 exp->p_priority = p->p_priority;
1070 exp->p_nice = p->p_nice;
1071 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1072 exp->p_xstat = p->p_xstat;
1073 exp->p_acflag = p->p_acflag;
1074 }
1075
1076 /*
1077 * Fill in an LP64 version of extern_proc structure for the specified process.
1078 */
1079 STATIC void
1080 fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1081 {
1082 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1083 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1084 exp->p_flag = p->p_flag;
1085 if (p->p_lflag & P_LTRACED)
1086 exp->p_flag |= P_TRACED;
1087 if (p->p_lflag & P_LPPWAIT)
1088 exp->p_flag |= P_PPWAIT;
1089 if (p->p_lflag & P_LEXIT)
1090 exp->p_flag |= P_WEXIT;
1091 exp->p_stat = p->p_stat;
1092 exp->p_pid = p->p_pid;
1093 exp->p_oppid = p->p_oppid;
1094 /* Mach related */
1095 exp->user_stack = p->user_stack;
1096 exp->p_debugger = p->p_debugger;
1097 exp->sigwait = p->sigwait;
1098 /* scheduling */
1099 #ifdef _PROC_HAS_SCHEDINFO_
1100 exp->p_estcpu = p->p_estcpu;
1101 exp->p_pctcpu = p->p_pctcpu;
1102 exp->p_slptime = p->p_slptime;
1103 #endif
1104 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1105 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1106
1107 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1108 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1109
1110 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1111 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1112
1113 exp->p_sigignore = p->p_sigignore;
1114 exp->p_sigcatch = p->p_sigcatch;
1115 exp->p_priority = p->p_priority;
1116 exp->p_nice = p->p_nice;
1117 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1118 exp->p_xstat = p->p_xstat;
1119 exp->p_acflag = p->p_acflag;
1120 }
1121
1122 STATIC void
1123 fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1124 {
1125 /* on a 64 bit kernel, 32 bit users get some truncated information */
1126 fill_user32_externproc(p, &kp->kp_proc);
1127 fill_user32_eproc(p, &kp->kp_eproc);
1128 }
1129
1130 STATIC void
1131 fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1132 {
1133 fill_user64_externproc(p, &kp->kp_proc);
1134 fill_user64_eproc(p, &kp->kp_eproc);
1135 }
1136
1137 STATIC int
1138 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1139 {
1140 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1141 int *name = arg1; /* oid element argument vector */
1142 int namelen = arg2; /* number of oid element arguments */
1143 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1144 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1145 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1146 // size_t newlen = req->newlen; /* user buffer copy in size */
1147
1148 int ret=0;
1149
1150 if (namelen == 0)
1151 return(ENOTSUP);
1152
1153 switch(name[0]) {
1154 case KERN_KDEFLAGS:
1155 case KERN_KDDFLAGS:
1156 case KERN_KDENABLE:
1157 case KERN_KDGETBUF:
1158 case KERN_KDSETUP:
1159 case KERN_KDREMOVE:
1160 case KERN_KDSETREG:
1161 case KERN_KDGETREG:
1162 case KERN_KDREADTR:
1163 case KERN_KDWRITETR:
1164 case KERN_KDWRITEMAP:
1165 case KERN_KDTEST:
1166 case KERN_KDPIDTR:
1167 case KERN_KDTHRMAP:
1168 case KERN_KDPIDEX:
1169 case KERN_KDSETBUF:
1170 case KERN_KDREADCURTHRMAP:
1171 case KERN_KDSET_TYPEFILTER:
1172 case KERN_KDBUFWAIT:
1173 case KERN_KDCPUMAP:
1174 case KERN_KDWRITEMAP_V3:
1175 case KERN_KDWRITETR_V3:
1176 ret = kdbg_control(name, namelen, oldp, oldlenp);
1177 break;
1178 default:
1179 ret= ENOTSUP;
1180 break;
1181 }
1182
1183 /* adjust index so we return the right required/consumed amount */
1184 if (!ret)
1185 req->oldidx += req->oldlen;
1186
1187 return (ret);
1188 }
1189 SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1190 0, /* Pointer argument (arg1) */
1191 0, /* Integer argument (arg2) */
1192 sysctl_kdebug_ops, /* Handler function */
1193 NULL, /* Data pointer */
1194 "");
1195
1196
1197 #if !CONFIG_EMBEDDED
1198 /*
1199 * Return the top *sizep bytes of the user stack, or the entire area of the
1200 * user stack down through the saved exec_path, whichever is smaller.
1201 */
1202 STATIC int
1203 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1204 {
1205 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1206 int *name = arg1; /* oid element argument vector */
1207 int namelen = arg2; /* number of oid element arguments */
1208 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1209 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1210 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1211 // size_t newlen = req->newlen; /* user buffer copy in size */
1212 int error;
1213
1214 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1215
1216 /* adjust index so we return the right required/consumed amount */
1217 if (!error)
1218 req->oldidx += req->oldlen;
1219
1220 return (error);
1221 }
1222 SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1223 0, /* Pointer argument (arg1) */
1224 0, /* Integer argument (arg2) */
1225 sysctl_doprocargs, /* Handler function */
1226 NULL, /* Data pointer */
1227 "");
1228 #endif /* !CONFIG_EMBEDDED */
1229
1230 STATIC int
1231 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1232 {
1233 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1234 int *name = arg1; /* oid element argument vector */
1235 int namelen = arg2; /* number of oid element arguments */
1236 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1237 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1238 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1239 // size_t newlen = req->newlen; /* user buffer copy in size */
1240 int error;
1241
1242 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1243
1244 /* adjust index so we return the right required/consumed amount */
1245 if (!error)
1246 req->oldidx += req->oldlen;
1247
1248 return (error);
1249 }
1250 SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1251 0, /* Pointer argument (arg1) */
1252 0, /* Integer argument (arg2) */
1253 sysctl_doprocargs2, /* Handler function */
1254 NULL, /* Data pointer */
1255 "");
1256
1257 STATIC int
1258 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1259 size_t *sizep, proc_t cur_proc, int argc_yes)
1260 {
1261 proc_t p;
1262 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1263 int error = 0;
1264 struct _vm_map *proc_map;
1265 struct task * task;
1266 vm_map_copy_t tmp;
1267 user_addr_t arg_addr;
1268 size_t arg_size;
1269 caddr_t data;
1270 size_t argslen=0;
1271 int size;
1272 vm_offset_t copy_start, copy_end;
1273 kern_return_t ret;
1274 int pid;
1275 kauth_cred_t my_cred;
1276 uid_t uid;
1277 int argc = -1;
1278
1279 if ( namelen < 1 )
1280 return(EINVAL);
1281
1282 if (argc_yes)
1283 buflen -= sizeof(int); /* reserve first word to return argc */
1284
1285 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1286 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1287 /* is not NULL then the caller wants us to return the length needed to */
1288 /* hold the data we would return */
1289 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1290 return(EINVAL);
1291 }
1292 arg_size = buflen;
1293
1294 /*
1295 * Lookup process by pid
1296 */
1297 pid = name[0];
1298 p = proc_find(pid);
1299 if (p == NULL) {
1300 return(EINVAL);
1301 }
1302
1303 /*
1304 * Copy the top N bytes of the stack.
1305 * On all machines we have so far, the stack grows
1306 * downwards.
1307 *
1308 * If the user expects no more than N bytes of
1309 * argument list, use that as a guess for the
1310 * size.
1311 */
1312
1313 if (!p->user_stack) {
1314 proc_rele(p);
1315 return(EINVAL);
1316 }
1317
1318 if (where == USER_ADDR_NULL) {
1319 /* caller only wants to know length of proc args data */
1320 if (sizep == NULL) {
1321 proc_rele(p);
1322 return(EFAULT);
1323 }
1324
1325 size = p->p_argslen;
1326 proc_rele(p);
1327 if (argc_yes) {
1328 size += sizeof(int);
1329 } else {
1330 /*
1331 * old PROCARGS will return the executable's path and plus some
1332 * extra space for work alignment and data tags
1333 */
1334 size += PATH_MAX + (6 * sizeof(int));
1335 }
1336 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1337 *sizep = size;
1338 return (0);
1339 }
1340
1341 my_cred = kauth_cred_proc_ref(p);
1342 uid = kauth_cred_getuid(my_cred);
1343 kauth_cred_unref(&my_cred);
1344
1345 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1346 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1347 proc_rele(p);
1348 return (EINVAL);
1349 }
1350
1351 if ((u_int)arg_size > p->p_argslen)
1352 arg_size = round_page(p->p_argslen);
1353
1354 arg_addr = p->user_stack - arg_size;
1355
1356 /*
1357 * Before we can block (any VM code), make another
1358 * reference to the map to keep it alive. We do
1359 * that by getting a reference on the task itself.
1360 */
1361 task = p->task;
1362 if (task == NULL) {
1363 proc_rele(p);
1364 return(EINVAL);
1365 }
1366
1367 /* save off argc before releasing the proc */
1368 argc = p->p_argc;
1369
1370 argslen = p->p_argslen;
1371 /*
1372 * Once we have a task reference we can convert that into a
1373 * map reference, which we will use in the calls below. The
1374 * task/process may change its map after we take this reference
1375 * (see execve), but the worst that will happen then is a return
1376 * of stale info (which is always a possibility).
1377 */
1378 task_reference(task);
1379 proc_rele(p);
1380 proc_map = get_task_map_reference(task);
1381 task_deallocate(task);
1382
1383 if (proc_map == NULL)
1384 return(EINVAL);
1385
1386
1387 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size), VM_KERN_MEMORY_BSD);
1388 if (ret != KERN_SUCCESS) {
1389 vm_map_deallocate(proc_map);
1390 return(ENOMEM);
1391 }
1392
1393 copy_end = round_page(copy_start + arg_size);
1394
1395 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1396 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1397 vm_map_deallocate(proc_map);
1398 kmem_free(kernel_map, copy_start,
1399 round_page(arg_size));
1400 return (EIO);
1401 }
1402
1403 /*
1404 * Now that we've done the copyin from the process'
1405 * map, we can release the reference to it.
1406 */
1407 vm_map_deallocate(proc_map);
1408
1409 if( vm_map_copy_overwrite(kernel_map,
1410 (vm_map_address_t)copy_start,
1411 tmp, FALSE) != KERN_SUCCESS) {
1412 kmem_free(kernel_map, copy_start,
1413 round_page(arg_size));
1414 vm_map_copy_discard(tmp);
1415 return (EIO);
1416 }
1417
1418 if (arg_size > argslen) {
1419 data = (caddr_t) (copy_end - argslen);
1420 size = argslen;
1421 } else {
1422 data = (caddr_t) (copy_end - arg_size);
1423 size = arg_size;
1424 }
1425
1426 /*
1427 * When these sysctls were introduced, the first string in the strings
1428 * section was just the bare path of the executable. However, for security
1429 * reasons we now prefix this string with executable_path= so it can be
1430 * parsed getenv style. To avoid binary compatability issues with exising
1431 * callers of this sysctl, we strip it off here if present.
1432 * (rdar://problem/13746466)
1433 */
1434 #define EXECUTABLE_KEY "executable_path="
1435 if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
1436 data += strlen(EXECUTABLE_KEY);
1437 size -= strlen(EXECUTABLE_KEY);
1438 }
1439
1440 if (argc_yes) {
1441 /* Put processes argc as the first word in the copyout buffer */
1442 suword(where, argc);
1443 error = copyout(data, (where + sizeof(int)), size);
1444 size += sizeof(int);
1445 } else {
1446 error = copyout(data, where, size);
1447
1448 /*
1449 * Make the old PROCARGS work to return the executable's path
1450 * But, only if there is enough space in the provided buffer
1451 *
1452 * on entry: data [possibily] points to the beginning of the path
1453 *
1454 * Note: we keep all pointers&sizes aligned to word boundries
1455 */
1456 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1457 {
1458 int binPath_sz, alignedBinPath_sz = 0;
1459 int extraSpaceNeeded, addThis;
1460 user_addr_t placeHere;
1461 char * str = (char *) data;
1462 int max_len = size;
1463
1464 /* Some apps are really bad about messing up their stacks
1465 So, we have to be extra careful about getting the length
1466 of the executing binary. If we encounter an error, we bail.
1467 */
1468
1469 /* Limit ourselves to PATH_MAX paths */
1470 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1471
1472 binPath_sz = 0;
1473
1474 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1475 binPath_sz++;
1476
1477 /* If we have a NUL terminator, copy it, too */
1478 if (binPath_sz < max_len-1) binPath_sz += 1;
1479
1480 /* Pre-Flight the space requiremnts */
1481
1482 /* Account for the padding that fills out binPath to the next word */
1483 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1484
1485 placeHere = where + size;
1486
1487 /* Account for the bytes needed to keep placeHere word aligned */
1488 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1489
1490 /* Add up all the space that is needed */
1491 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1492
1493 /* is there is room to tack on argv[0]? */
1494 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1495 {
1496 placeHere += addThis;
1497 suword(placeHere, 0);
1498 placeHere += sizeof(int);
1499 suword(placeHere, 0xBFFF0000);
1500 placeHere += sizeof(int);
1501 suword(placeHere, 0);
1502 placeHere += sizeof(int);
1503 error = copyout(data, placeHere, binPath_sz);
1504 if ( ! error )
1505 {
1506 placeHere += binPath_sz;
1507 suword(placeHere, 0);
1508 size += extraSpaceNeeded;
1509 }
1510 }
1511 }
1512 }
1513
1514 if (copy_start != (vm_offset_t) 0) {
1515 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1516 }
1517 if (error) {
1518 return(error);
1519 }
1520
1521 if (where != USER_ADDR_NULL)
1522 *sizep = size;
1523 return (0);
1524 }
1525
1526
1527 /*
1528 * Max number of concurrent aio requests
1529 */
1530 STATIC int
1531 sysctl_aiomax
1532 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1533 {
1534 int new_value, changed;
1535 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1536 if (changed) {
1537 /* make sure the system-wide limit is greater than the per process limit */
1538 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1539 aio_max_requests = new_value;
1540 else
1541 error = EINVAL;
1542 }
1543 return(error);
1544 }
1545
1546
1547 /*
1548 * Max number of concurrent aio requests per process
1549 */
1550 STATIC int
1551 sysctl_aioprocmax
1552 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1553 {
1554 int new_value, changed;
1555 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1556 if (changed) {
1557 /* make sure per process limit is less than the system-wide limit */
1558 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1559 aio_max_requests_per_process = new_value;
1560 else
1561 error = EINVAL;
1562 }
1563 return(error);
1564 }
1565
1566
1567 /*
1568 * Max number of async IO worker threads
1569 */
1570 STATIC int
1571 sysctl_aiothreads
1572 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1573 {
1574 int new_value, changed;
1575 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1576 if (changed) {
1577 /* we only allow an increase in the number of worker threads */
1578 if (new_value > aio_worker_threads ) {
1579 _aio_create_worker_threads((new_value - aio_worker_threads));
1580 aio_worker_threads = new_value;
1581 }
1582 else
1583 error = EINVAL;
1584 }
1585 return(error);
1586 }
1587
1588
1589 /*
1590 * System-wide limit on the max number of processes
1591 */
1592 STATIC int
1593 sysctl_maxproc
1594 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1595 {
1596 int new_value, changed;
1597 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1598 if (changed) {
1599 AUDIT_ARG(value32, new_value);
1600 /* make sure the system-wide limit is less than the configured hard
1601 limit set at kernel compilation */
1602 if (new_value <= hard_maxproc && new_value > 0)
1603 maxproc = new_value;
1604 else
1605 error = EINVAL;
1606 }
1607 return(error);
1608 }
1609
1610 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1611 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1612 ostype, 0, "");
1613 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1614 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1615 osrelease, 0, "");
1616 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1617 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1618 (int *)NULL, BSD, "");
1619 SYSCTL_STRING(_kern, KERN_VERSION, version,
1620 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1621 version, 0, "");
1622 SYSCTL_STRING(_kern, OID_AUTO, uuid,
1623 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1624 &kernel_uuid_string[0], 0, "");
1625 #if DEBUG
1626 #ifndef DKPR
1627 #define DKPR 1
1628 #endif
1629 #endif
1630
1631 #if DKPR
1632 int debug_kprint_syscall = 0;
1633 char debug_kprint_syscall_process[MAXCOMLEN+1];
1634
1635 /* Thread safe: bits and string value are not used to reclaim state */
1636 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1637 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1638 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1639 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1640 "name of process for kprintf syscall tracing");
1641
1642 int debug_kprint_current_process(const char **namep)
1643 {
1644 struct proc *p = current_proc();
1645
1646 if (p == NULL) {
1647 return 0;
1648 }
1649
1650 if (debug_kprint_syscall_process[0]) {
1651 /* user asked to scope tracing to a particular process name */
1652 if(0 == strncmp(debug_kprint_syscall_process,
1653 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1654 /* no value in telling the user that we traced what they asked */
1655 if(namep) *namep = NULL;
1656
1657 return 1;
1658 } else {
1659 return 0;
1660 }
1661 }
1662
1663 /* trace all processes. Tell user what we traced */
1664 if (namep) {
1665 *namep = p->p_comm;
1666 }
1667
1668 return 1;
1669 }
1670 #endif
1671
1672 /* PR-5293665: need to use a callback function for kern.osversion to set
1673 * osversion in IORegistry */
1674
1675 STATIC int
1676 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1677 {
1678 int rval = 0;
1679
1680 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1681
1682 if (req->newptr) {
1683 IORegistrySetOSBuildVersion((char *)arg1);
1684 }
1685
1686 return rval;
1687 }
1688
1689 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1690 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1691 osversion, 256 /* OSVERSIZE*/,
1692 sysctl_osversion, "A", "");
1693
1694 static uint64_t osproductversion_string[48];
1695
1696 STATIC int
1697 sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1698 {
1699 if (req->newptr != 0) {
1700 /*
1701 * Can only ever be set by launchd, and only once at boot.
1702 */
1703 if (req->p->p_pid != 1 || osproductversion_string[0] != '\0') {
1704 return EPERM;
1705 }
1706 }
1707
1708 return sysctl_handle_string(oidp, arg1, arg2, req);
1709 }
1710
1711 SYSCTL_PROC(_kern, OID_AUTO, osproductversion,
1712 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1713 osproductversion_string, sizeof(osproductversion_string),
1714 sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist");
1715
1716 static uint64_t osvariant_status = 0;
1717
1718 STATIC int
1719 sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1720 {
1721 if (req->newptr != 0) {
1722 /*
1723 * Can only ever be set by launchd, and only once at boot.
1724 */
1725 if (req->p->p_pid != 1 || osvariant_status != 0) {
1726 return EPERM;
1727 }
1728 }
1729
1730 return sysctl_handle_quad(oidp, arg1, arg2, req);
1731 }
1732
1733 SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
1734 CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1735 &osvariant_status, sizeof(osvariant_status),
1736 sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
1737
1738 STATIC int
1739 sysctl_sysctl_bootargs
1740 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1741 {
1742 int error;
1743 /* BOOT_LINE_LENGTH */
1744 #if CONFIG_EMBEDDED
1745 size_t boot_args_len = 256;
1746 #else
1747 size_t boot_args_len = 1024;
1748 #endif
1749 char buf[boot_args_len];
1750
1751 strlcpy(buf, PE_boot_args(), boot_args_len);
1752 error = sysctl_io_string(req, buf, boot_args_len, 0, NULL);
1753 return(error);
1754 }
1755
1756 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1757 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1758 NULL, 0,
1759 sysctl_sysctl_bootargs, "A", "bootargs");
1760
1761 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1762 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1763 &maxfiles, 0, "");
1764 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1765 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1766 (int *)NULL, ARG_MAX, "");
1767 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1768 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1769 (int *)NULL, _POSIX_VERSION, "");
1770 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1771 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1772 (int *)NULL, NGROUPS_MAX, "");
1773 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1774 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1775 (int *)NULL, 1, "");
1776 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1777 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1778 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1779 (int *)NULL, 1, "");
1780 #else
1781 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1782 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1783 NULL, 0, "");
1784 #endif
1785 SYSCTL_INT(_kern, OID_AUTO, num_files,
1786 CTLFLAG_RD | CTLFLAG_LOCKED,
1787 &nfiles, 0, "");
1788 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1789 CTLFLAG_RD | CTLFLAG_LOCKED,
1790 &numvnodes, 0, "");
1791 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1792 CTLFLAG_RD | CTLFLAG_LOCKED,
1793 &task_max, 0, "");
1794 SYSCTL_INT(_kern, OID_AUTO, num_threads,
1795 CTLFLAG_RD | CTLFLAG_LOCKED,
1796 &thread_max, 0, "");
1797 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1798 CTLFLAG_RD | CTLFLAG_LOCKED,
1799 &task_threadmax, 0, "");
1800
1801 STATIC int
1802 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1803 {
1804 int oldval = desiredvnodes;
1805 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1806
1807 if (oldval != desiredvnodes) {
1808 resize_namecache(desiredvnodes);
1809 }
1810
1811 return(error);
1812 }
1813
1814 SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1815 CTLFLAG_RW | CTLFLAG_LOCKED,
1816 &nc_disabled, 0, "");
1817
1818 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1819 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1820 0, 0, sysctl_maxvnodes, "I", "");
1821
1822 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1823 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1824 0, 0, sysctl_maxproc, "I", "");
1825
1826 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1827 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1828 0, 0, sysctl_aiomax, "I", "");
1829
1830 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1831 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1832 0, 0, sysctl_aioprocmax, "I", "");
1833
1834 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1835 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1836 0, 0, sysctl_aiothreads, "I", "");
1837
1838 #if (DEVELOPMENT || DEBUG)
1839 extern int sched_smt_balance;
1840 SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1841 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1842 &sched_smt_balance, 0, "");
1843 extern int sched_allow_rt_smt;
1844 SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt,
1845 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1846 &sched_allow_rt_smt, 0, "");
1847 #if __arm__ || __arm64__
1848 extern uint32_t perfcontrol_requested_recommended_cores;
1849 SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores,
1850 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1851 &perfcontrol_requested_recommended_cores, 0, "");
1852
1853 /* Scheduler perfcontrol callouts sysctls */
1854 SYSCTL_DECL(_kern_perfcontrol_callout);
1855 SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
1856 "scheduler perfcontrol callouts");
1857
1858 extern int perfcontrol_callout_stats_enabled;
1859 SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled,
1860 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1861 &perfcontrol_callout_stats_enabled, 0, "");
1862
1863 extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
1864 perfcontrol_callout_stat_t stat);
1865
1866 /* On-Core Callout */
1867 STATIC int
1868 sysctl_perfcontrol_callout_stat
1869 (__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1870 {
1871 perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
1872 perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
1873 return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
1874 sizeof(int), NULL, NULL);
1875 }
1876
1877 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
1878 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1879 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
1880 sysctl_perfcontrol_callout_stat, "I", "");
1881 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
1882 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1883 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
1884 sysctl_perfcontrol_callout_stat, "I", "");
1885 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
1886 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1887 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
1888 sysctl_perfcontrol_callout_stat, "I", "");
1889 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
1890 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1891 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
1892 sysctl_perfcontrol_callout_stat, "I", "");
1893 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
1894 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1895 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
1896 sysctl_perfcontrol_callout_stat, "I", "");
1897 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
1898 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1899 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
1900 sysctl_perfcontrol_callout_stat, "I", "");
1901 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
1902 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1903 (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
1904 sysctl_perfcontrol_callout_stat, "I", "");
1905 SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
1906 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1907 (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
1908 sysctl_perfcontrol_callout_stat, "I", "");
1909
1910 #endif /* __arm__ || __arm64__ */
1911 #endif /* (DEVELOPMENT || DEBUG) */
1912
1913 STATIC int
1914 sysctl_securelvl
1915 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1916 {
1917 int new_value, changed;
1918 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1919 if (changed) {
1920 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1921 proc_list_lock();
1922 securelevel = new_value;
1923 proc_list_unlock();
1924 } else {
1925 error = EPERM;
1926 }
1927 }
1928 return(error);
1929 }
1930
1931 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1932 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1933 0, 0, sysctl_securelvl, "I", "");
1934
1935
1936 STATIC int
1937 sysctl_domainname
1938 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1939 {
1940 int error, changed;
1941 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1942 if (changed) {
1943 domainnamelen = strlen(domainname);
1944 }
1945 return(error);
1946 }
1947
1948 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1949 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1950 0, 0, sysctl_domainname, "A", "");
1951
1952 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1953 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1954 &hostid, 0, "");
1955
1956 STATIC int
1957 sysctl_hostname
1958 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1959 {
1960 int error, changed;
1961 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1962 if (changed) {
1963 hostnamelen = req->newlen;
1964 }
1965 return(error);
1966 }
1967
1968
1969 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1970 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1971 0, 0, sysctl_hostname, "A", "");
1972
1973 STATIC int
1974 sysctl_procname
1975 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1976 {
1977 /* Original code allowed writing, I'm copying this, although this all makes
1978 no sense to me. Besides, this sysctl is never used. */
1979 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1980 }
1981
1982 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
1983 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1984 0, 0, sysctl_procname, "A", "");
1985
1986 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
1987 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1988 &speculative_reads_disabled, 0, "");
1989
1990 SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
1991 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1992 &preheat_max_bytes, 0, "");
1993
1994 SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
1995 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1996 &preheat_min_bytes, 0, "");
1997
1998 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1999 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2000 &speculative_prefetch_max, 0, "");
2001
2002 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
2003 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2004 &speculative_prefetch_max_iosize, 0, "");
2005
2006 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
2007 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2008 &vm_page_free_target, 0, "");
2009
2010 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
2011 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2012 &vm_page_free_min, 0, "");
2013
2014 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
2015 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2016 &vm_page_free_reserved, 0, "");
2017
2018 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
2019 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2020 &vm_page_speculative_percentage, 0, "");
2021
2022 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
2023 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2024 &vm_page_speculative_q_age_ms, 0, "");
2025
2026 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
2027 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2028 &vm_max_delayed_work_limit, 0, "");
2029
2030 SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
2031 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2032 &vm_max_batch, 0, "");
2033
2034 SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
2035 CTLFLAG_RD | CTLFLAG_LOCKED,
2036 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
2037
2038 STATIC int
2039 sysctl_boottime
2040 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2041 {
2042 struct timeval tv;
2043 boottime_timeval(&tv);
2044 struct proc *p = req->p;
2045
2046 if (proc_is64bit(p)) {
2047 struct user64_timeval t = {};
2048 t.tv_sec = tv.tv_sec;
2049 t.tv_usec = tv.tv_usec;
2050 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2051 } else {
2052 struct user32_timeval t = {};
2053 t.tv_sec = tv.tv_sec;
2054 t.tv_usec = tv.tv_usec;
2055 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
2056 }
2057 }
2058
2059 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
2060 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2061 0, 0, sysctl_boottime, "S,timeval", "");
2062
2063 STATIC int
2064 sysctl_symfile
2065 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2066 {
2067 char *str;
2068 int error = get_kernel_symfile(req->p, &str);
2069 if (error)
2070 return (error);
2071 return sysctl_io_string(req, str, 0, 0, NULL);
2072 }
2073
2074
2075 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
2076 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
2077 0, 0, sysctl_symfile, "A", "");
2078
2079 #if NFSCLIENT
2080 STATIC int
2081 sysctl_netboot
2082 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2083 {
2084 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
2085 }
2086
2087 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
2088 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2089 0, 0, sysctl_netboot, "I", "");
2090 #endif
2091
2092 #ifdef CONFIG_IMGSRC_ACCESS
2093 /*
2094 * Legacy--act as if only one layer of nesting is possible.
2095 */
2096 STATIC int
2097 sysctl_imgsrcdev
2098 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2099 {
2100 vfs_context_t ctx = vfs_context_current();
2101 vnode_t devvp;
2102 int result;
2103
2104 if (!vfs_context_issuser(ctx)) {
2105 return EPERM;
2106 }
2107
2108 if (imgsrc_rootvnodes[0] == NULL) {
2109 return ENOENT;
2110 }
2111
2112 result = vnode_getwithref(imgsrc_rootvnodes[0]);
2113 if (result != 0) {
2114 return result;
2115 }
2116
2117 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
2118 result = vnode_getwithref(devvp);
2119 if (result != 0) {
2120 goto out;
2121 }
2122
2123 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
2124
2125 vnode_put(devvp);
2126 out:
2127 vnode_put(imgsrc_rootvnodes[0]);
2128 return result;
2129 }
2130
2131 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
2132 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2133 0, 0, sysctl_imgsrcdev, "I", "");
2134
2135 STATIC int
2136 sysctl_imgsrcinfo
2137 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2138 {
2139 int error;
2140 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */
2141 uint32_t i;
2142 vnode_t rvp, devvp;
2143
2144 if (imgsrc_rootvnodes[0] == NULLVP) {
2145 return ENXIO;
2146 }
2147
2148 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2149 /*
2150 * Go get the root vnode.
2151 */
2152 rvp = imgsrc_rootvnodes[i];
2153 if (rvp == NULLVP) {
2154 break;
2155 }
2156
2157 error = vnode_get(rvp);
2158 if (error != 0) {
2159 return error;
2160 }
2161
2162 /*
2163 * For now, no getting at a non-local volume.
2164 */
2165 devvp = vnode_mount(rvp)->mnt_devvp;
2166 if (devvp == NULL) {
2167 vnode_put(rvp);
2168 return EINVAL;
2169 }
2170
2171 error = vnode_getwithref(devvp);
2172 if (error != 0) {
2173 vnode_put(rvp);
2174 return error;
2175 }
2176
2177 /*
2178 * Fill in info.
2179 */
2180 info[i].ii_dev = vnode_specrdev(devvp);
2181 info[i].ii_flags = 0;
2182 info[i].ii_height = i;
2183 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2184
2185 vnode_put(devvp);
2186 vnode_put(rvp);
2187 }
2188
2189 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2190 }
2191
2192 SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2193 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2194 0, 0, sysctl_imgsrcinfo, "I", "");
2195
2196 #endif /* CONFIG_IMGSRC_ACCESS */
2197
2198
2199 SYSCTL_DECL(_kern_timer);
2200 SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2201
2202
2203 SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2204 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2205 &mach_timer_coalescing_enabled, 0, "");
2206
2207 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2208 CTLFLAG_RW | CTLFLAG_LOCKED,
2209 &timer_deadline_tracking_bin_1, "");
2210 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2211 CTLFLAG_RW | CTLFLAG_LOCKED,
2212 &timer_deadline_tracking_bin_2, "");
2213
2214 SYSCTL_DECL(_kern_timer_longterm);
2215 SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2216
2217
2218 /* Must match definition in osfmk/kern/timer_call.c */
2219 enum {
2220 THRESHOLD, QCOUNT,
2221 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2222 LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES
2223 };
2224 extern uint64_t timer_sysctl_get(int);
2225 extern int timer_sysctl_set(int, uint64_t);
2226
2227 STATIC int
2228 sysctl_timer
2229 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2230 {
2231 int oid = (int)arg1;
2232 uint64_t value = timer_sysctl_get(oid);
2233 uint64_t new_value;
2234 int error;
2235 int changed;
2236
2237 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2238 if (changed)
2239 error = timer_sysctl_set(oid, new_value);
2240
2241 return error;
2242 }
2243
2244 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2245 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2246 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2247 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
2248 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2249 (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
2250 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_interval,
2251 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2252 (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", "");
2253
2254 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2255 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2256 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
2257 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
2258 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2259 (void *) PAUSES, 0, sysctl_timer, "Q", "");
2260
2261 #if DEBUG
2262 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2263 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2264 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2265 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2266 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2267 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2268 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2269 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2270 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2271 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2272 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2273 (void *) SCANS, 0, sysctl_timer, "Q", "");
2274 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2275 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2276 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2277 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2278 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2279 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2280 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2281 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2282 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2283 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2284 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2285 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2286 #endif /* DEBUG */
2287
2288 STATIC int
2289 sysctl_usrstack
2290 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2291 {
2292 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2293 }
2294
2295 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2296 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2297 0, 0, sysctl_usrstack, "I", "");
2298
2299 STATIC int
2300 sysctl_usrstack64
2301 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2302 {
2303 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2304 }
2305
2306 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2307 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2308 0, 0, sysctl_usrstack64, "Q", "");
2309
2310 #if CONFIG_COREDUMP
2311
2312 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2313 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2314 corefilename, sizeof(corefilename), "");
2315
2316 STATIC int
2317 sysctl_coredump
2318 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2319 {
2320 #ifdef SECURE_KERNEL
2321 (void)req;
2322 return (ENOTSUP);
2323 #else
2324 int new_value, changed;
2325 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2326 if (changed) {
2327 if ((new_value == 0) || (new_value == 1))
2328 do_coredump = new_value;
2329 else
2330 error = EINVAL;
2331 }
2332 return(error);
2333 #endif
2334 }
2335
2336 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2337 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2338 0, 0, sysctl_coredump, "I", "");
2339
2340 STATIC int
2341 sysctl_suid_coredump
2342 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2343 {
2344 #ifdef SECURE_KERNEL
2345 (void)req;
2346 return (ENOTSUP);
2347 #else
2348 int new_value, changed;
2349 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2350 if (changed) {
2351 if ((new_value == 0) || (new_value == 1))
2352 sugid_coredump = new_value;
2353 else
2354 error = EINVAL;
2355 }
2356 return(error);
2357 #endif
2358 }
2359
2360 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2361 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2362 0, 0, sysctl_suid_coredump, "I", "");
2363
2364 #endif /* CONFIG_COREDUMP */
2365
2366 STATIC int
2367 sysctl_delayterm
2368 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2369 {
2370 struct proc *p = req->p;
2371 int new_value, changed;
2372 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2373 if (changed) {
2374 proc_lock(p);
2375 if (new_value)
2376 req->p->p_lflag |= P_LDELAYTERM;
2377 else
2378 req->p->p_lflag &= ~P_LDELAYTERM;
2379 proc_unlock(p);
2380 }
2381 return(error);
2382 }
2383
2384 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2385 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2386 0, 0, sysctl_delayterm, "I", "");
2387
2388
2389 STATIC int
2390 sysctl_rage_vnode
2391 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2392 {
2393 struct proc *p = req->p;
2394 struct uthread *ut;
2395 int new_value, old_value, changed;
2396 int error;
2397
2398 ut = get_bsdthread_info(current_thread());
2399
2400 if (ut->uu_flag & UT_RAGE_VNODES)
2401 old_value = KERN_RAGE_THREAD;
2402 else if (p->p_lflag & P_LRAGE_VNODES)
2403 old_value = KERN_RAGE_PROC;
2404 else
2405 old_value = 0;
2406
2407 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2408
2409 if (error == 0) {
2410 switch (new_value) {
2411 case KERN_RAGE_PROC:
2412 proc_lock(p);
2413 p->p_lflag |= P_LRAGE_VNODES;
2414 proc_unlock(p);
2415 break;
2416 case KERN_UNRAGE_PROC:
2417 proc_lock(p);
2418 p->p_lflag &= ~P_LRAGE_VNODES;
2419 proc_unlock(p);
2420 break;
2421
2422 case KERN_RAGE_THREAD:
2423 ut->uu_flag |= UT_RAGE_VNODES;
2424 break;
2425 case KERN_UNRAGE_THREAD:
2426 ut = get_bsdthread_info(current_thread());
2427 ut->uu_flag &= ~UT_RAGE_VNODES;
2428 break;
2429 }
2430 }
2431 return(error);
2432 }
2433
2434 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2435 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2436 0, 0, sysctl_rage_vnode, "I", "");
2437
2438 /* XXX move this interface into libproc and remove this sysctl */
2439 STATIC int
2440 sysctl_setthread_cpupercent
2441 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2442 {
2443 int new_value, old_value;
2444 int error = 0;
2445 kern_return_t kret = KERN_SUCCESS;
2446 uint8_t percent = 0;
2447 int ms_refill = 0;
2448
2449 if (!req->newptr)
2450 return (0);
2451
2452 old_value = 0;
2453
2454 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2455 return (error);
2456
2457 percent = new_value & 0xff; /* low 8 bytes for perent */
2458 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2459 if (percent > 100)
2460 return (EINVAL);
2461
2462 /*
2463 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2464 */
2465 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2466 return (EIO);
2467
2468 return (0);
2469 }
2470
2471 SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2472 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2473 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2474
2475
2476 STATIC int
2477 sysctl_kern_check_openevt
2478 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2479 {
2480 struct proc *p = req->p;
2481 int new_value, old_value, changed;
2482 int error;
2483
2484 if (p->p_flag & P_CHECKOPENEVT) {
2485 old_value = KERN_OPENEVT_PROC;
2486 } else {
2487 old_value = 0;
2488 }
2489
2490 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2491
2492 if (error == 0) {
2493 switch (new_value) {
2494 case KERN_OPENEVT_PROC:
2495 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2496 break;
2497
2498 case KERN_UNOPENEVT_PROC:
2499 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2500 break;
2501
2502 default:
2503 error = EINVAL;
2504 }
2505 }
2506 return(error);
2507 }
2508
2509 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2510 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2511
2512
2513
2514 STATIC int
2515 sysctl_nx
2516 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2517 {
2518 #ifdef SECURE_KERNEL
2519 (void)req;
2520 return ENOTSUP;
2521 #else
2522 int new_value, changed;
2523 int error;
2524
2525 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2526 if (error)
2527 return error;
2528
2529 if (changed) {
2530 #if defined(__i386__) || defined(__x86_64__)
2531 /*
2532 * Only allow setting if NX is supported on the chip
2533 */
2534 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2535 return ENOTSUP;
2536 #endif
2537 nx_enabled = new_value;
2538 }
2539 return(error);
2540 #endif /* SECURE_KERNEL */
2541 }
2542
2543
2544
2545 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2546 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2547 0, 0, sysctl_nx, "I", "");
2548
2549 STATIC int
2550 sysctl_loadavg
2551 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2552 {
2553 if (proc_is64bit(req->p)) {
2554 struct user64_loadavg loadinfo64 = {};
2555 fill_loadavg64(&averunnable, &loadinfo64);
2556 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2557 } else {
2558 struct user32_loadavg loadinfo32 = {};
2559 fill_loadavg32(&averunnable, &loadinfo32);
2560 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2561 }
2562 }
2563
2564 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2565 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2566 0, 0, sysctl_loadavg, "S,loadavg", "");
2567
2568 /*
2569 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2570 */
2571 STATIC int
2572 sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2573 __unused int arg2, struct sysctl_req *req)
2574 {
2575 int old_value=0, new_value=0, error=0;
2576
2577 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2578 return(error);
2579 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2580 if (!error) {
2581 return (vm_toggle_entry_reuse(new_value, NULL));
2582 }
2583 return(error);
2584 }
2585
2586 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2587
2588
2589 STATIC int
2590 sysctl_swapusage
2591 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2592 {
2593 int error;
2594 uint64_t swap_total;
2595 uint64_t swap_avail;
2596 vm_size_t swap_pagesize;
2597 boolean_t swap_encrypted;
2598 struct xsw_usage xsu = {};
2599
2600 error = macx_swapinfo(&swap_total,
2601 &swap_avail,
2602 &swap_pagesize,
2603 &swap_encrypted);
2604 if (error)
2605 return error;
2606
2607 xsu.xsu_total = swap_total;
2608 xsu.xsu_avail = swap_avail;
2609 xsu.xsu_used = swap_total - swap_avail;
2610 xsu.xsu_pagesize = swap_pagesize;
2611 xsu.xsu_encrypted = swap_encrypted;
2612 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2613 }
2614
2615
2616
2617 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2618 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2619 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2620
2621 #if CONFIG_FREEZE
2622 extern void vm_page_reactivate_all_throttled(void);
2623
2624 static int
2625 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2626 {
2627 #pragma unused(arg1, arg2)
2628 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2629 boolean_t disabled;
2630
2631 error = sysctl_handle_int(oidp, &val, 0, req);
2632 if (error || !req->newptr)
2633 return (error);
2634
2635 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
2636 //assert(req->newptr);
2637 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2638 return EINVAL;
2639 }
2640
2641 /*
2642 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2643 */
2644 disabled = (!val && memorystatus_freeze_enabled);
2645
2646 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2647
2648 if (disabled) {
2649 vm_page_reactivate_all_throttled();
2650 }
2651
2652 return (0);
2653 }
2654
2655 SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2656 #endif /* CONFIG_FREEZE */
2657
2658 /* this kernel does NOT implement shared_region_make_private_np() */
2659 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2660 CTLFLAG_RD | CTLFLAG_LOCKED,
2661 (int *)NULL, 0, "");
2662
2663 STATIC int
2664 fetch_process_cputype(
2665 proc_t cur_proc,
2666 int *name,
2667 u_int namelen,
2668 cpu_type_t *cputype)
2669 {
2670 proc_t p = PROC_NULL;
2671 int refheld = 0;
2672 cpu_type_t ret = 0;
2673 int error = 0;
2674
2675 if (namelen == 0)
2676 p = cur_proc;
2677 else if (namelen == 1) {
2678 p = proc_find(name[0]);
2679 if (p == NULL)
2680 return (EINVAL);
2681 refheld = 1;
2682 } else {
2683 error = EINVAL;
2684 goto out;
2685 }
2686
2687 ret = cpu_type() & ~CPU_ARCH_MASK;
2688 if (IS_64BIT_PROCESS(p))
2689 ret |= CPU_ARCH_ABI64;
2690
2691 *cputype = ret;
2692
2693 if (refheld != 0)
2694 proc_rele(p);
2695 out:
2696 return (error);
2697 }
2698
2699 STATIC int
2700 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2701 struct sysctl_req *req)
2702 {
2703 int error;
2704 cpu_type_t proc_cputype = 0;
2705 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2706 return error;
2707 int res = 1;
2708 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2709 res = 0;
2710 return SYSCTL_OUT(req, &res, sizeof(res));
2711 }
2712 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2713
2714 STATIC int
2715 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2716 struct sysctl_req *req)
2717 {
2718 int error;
2719 cpu_type_t proc_cputype = 0;
2720 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2721 return error;
2722 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2723 }
2724 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2725
2726 STATIC int
2727 sysctl_safeboot
2728 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2729 {
2730 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2731 }
2732
2733 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2734 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2735 0, 0, sysctl_safeboot, "I", "");
2736
2737 STATIC int
2738 sysctl_singleuser
2739 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2740 {
2741 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2742 }
2743
2744 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2745 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2746 0, 0, sysctl_singleuser, "I", "");
2747
2748 STATIC int sysctl_minimalboot
2749 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2750 {
2751 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
2752 }
2753
2754 SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
2755 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2756 0, 0, sysctl_minimalboot, "I", "");
2757
2758 /*
2759 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2760 */
2761 extern boolean_t affinity_sets_enabled;
2762 extern int affinity_sets_mapping;
2763
2764 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2765 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2766 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2767 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2768
2769 /*
2770 * Boolean indicating if KASLR is active.
2771 */
2772 STATIC int
2773 sysctl_slide
2774 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2775 {
2776 uint32_t slide;
2777
2778 slide = vm_kernel_slide ? 1 : 0;
2779
2780 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2781 }
2782
2783 SYSCTL_PROC(_kern, OID_AUTO, slide,
2784 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2785 0, 0, sysctl_slide, "I", "");
2786
2787 /*
2788 * Limit on total memory users can wire.
2789 *
2790 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2791 *
2792 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2793 *
2794 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2795 * kmem_init().
2796 *
2797 * All values are in bytes.
2798 */
2799
2800 vm_map_size_t vm_global_no_user_wire_amount;
2801 vm_map_size_t vm_global_user_wire_limit;
2802 vm_map_size_t vm_user_wire_limit;
2803
2804 /*
2805 * There needs to be a more automatic/elegant way to do this
2806 */
2807 #if defined(__ARM__)
2808 SYSCTL_INT(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, 0, "");
2809 SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
2810 SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, 0, "");
2811 #else
2812 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2813 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2814 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
2815 #endif
2816
2817 extern int vm_map_copy_overwrite_aligned_src_not_internal;
2818 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2819 extern int vm_map_copy_overwrite_aligned_src_large;
2820 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2821 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2822 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
2823
2824
2825 extern uint32_t vm_page_external_count;
2826 extern uint32_t vm_page_filecache_min;
2827
2828 SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2829 SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2830
2831 extern int vm_compressor_mode;
2832 extern int vm_compressor_is_active;
2833 extern int vm_compressor_available;
2834 extern uint32_t vm_ripe_target_age;
2835 extern uint32_t swapout_target_age;
2836 extern int64_t compressor_bytes_used;
2837 extern int64_t c_segment_input_bytes;
2838 extern int64_t c_segment_compressed_bytes;
2839 extern uint32_t compressor_eval_period_in_msecs;
2840 extern uint32_t compressor_sample_min_in_msecs;
2841 extern uint32_t compressor_sample_max_in_msecs;
2842 extern uint32_t compressor_thrashing_threshold_per_10msecs;
2843 extern uint32_t compressor_thrashing_min_per_10msecs;
2844 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
2845 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
2846 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
2847 extern uint32_t vm_compressor_catchup_threshold_divisor;
2848 extern uint32_t vm_compressor_time_thread;
2849 #if DEVELOPMENT || DEBUG
2850 extern vmct_stats_t vmct_stats;
2851 #endif
2852
2853 SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
2854 SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
2855 SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2856
2857 SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
2858 SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
2859 SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
2860 SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
2861
2862 SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
2863
2864 SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2865 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2866 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2867 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2868 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2869 SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2870 SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2871 SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2872 SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2873
2874 SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2875
2876 SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
2877
2878 #if DEVELOPMENT || DEBUG
2879 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[0], "");
2880 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[1], "");
2881
2882 SYSCTL_QUAD(_vm, OID_AUTO, compressor_threads_total, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_cthreads_total, "");
2883
2884 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[0], "");
2885 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[1], "");
2886
2887 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[0], "");
2888 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[1], "");
2889
2890 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[0], 0, "");
2891 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[1], 0, "");
2892
2893 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, "");
2894 SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, "");
2895
2896 #endif
2897
2898 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
2899 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
2900 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
2901 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
2902 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
2903
2904 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
2905 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
2906
2907 SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
2908
2909 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
2910
2911 SYSCTL_QUAD(_vm, OID_AUTO, wk_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_cabstime, "");
2912
2913 SYSCTL_QUAD(_vm, OID_AUTO, wkh_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_cabstime, "");
2914 SYSCTL_QUAD(_vm, OID_AUTO, wkh_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_compressions, "");
2915
2916 SYSCTL_QUAD(_vm, OID_AUTO, wks_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_cabstime, "");
2917 SYSCTL_QUAD(_vm, OID_AUTO, wks_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressions, "");
2918
2919 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
2920 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
2921 SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
2922 SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
2923 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
2924 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
2925
2926 SYSCTL_QUAD(_vm, OID_AUTO, wks_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressed_bytes, "");
2927 SYSCTL_QUAD(_vm, OID_AUTO, wks_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compression_failures, "");
2928 SYSCTL_QUAD(_vm, OID_AUTO, wks_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_sv_compressions, "");
2929
2930
2931 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
2932
2933 SYSCTL_QUAD(_vm, OID_AUTO, wk_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_dabstime, "");
2934
2935 SYSCTL_QUAD(_vm, OID_AUTO, wkh_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_dabstime, "");
2936 SYSCTL_QUAD(_vm, OID_AUTO, wkh_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_decompressions, "");
2937
2938 SYSCTL_QUAD(_vm, OID_AUTO, wks_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_dabstime, "");
2939 SYSCTL_QUAD(_vm, OID_AUTO, wks_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_decompressions, "");
2940
2941 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
2942 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
2943
2944 SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
2945 SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
2946 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
2947 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
2948 SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
2949 SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
2950 SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
2951 SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
2952 #if DEVELOPMENT || DEBUG
2953 extern int vm_compressor_current_codec;
2954 extern int vm_compressor_test_seg_wp;
2955 extern boolean_t vm_compressor_force_sw_wkdm;
2956 SYSCTL_INT(_vm, OID_AUTO, compressor_codec, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_current_codec, 0, "");
2957 SYSCTL_INT(_vm, OID_AUTO, compressor_test_wp, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_test_seg_wp, 0, "");
2958
2959 SYSCTL_INT(_vm, OID_AUTO, wksw_force, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_force_sw_wkdm, 0, "");
2960 extern int precompy, wkswhw;
2961
2962 SYSCTL_INT(_vm, OID_AUTO, precompy, CTLFLAG_RW | CTLFLAG_LOCKED, &precompy, 0, "");
2963 SYSCTL_INT(_vm, OID_AUTO, wkswhw, CTLFLAG_RW | CTLFLAG_LOCKED, &wkswhw, 0, "");
2964 extern unsigned int vm_ktrace_enabled;
2965 SYSCTL_INT(_vm, OID_AUTO, vm_ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ktrace_enabled, 0, "");
2966 #endif
2967
2968 #if CONFIG_PHANTOM_CACHE
2969 extern uint32_t phantom_cache_thrashing_threshold;
2970 extern uint32_t phantom_cache_eval_period_in_msecs;
2971 extern uint32_t phantom_cache_thrashing_threshold_ssd;
2972
2973
2974 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2975 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2976 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2977 #endif
2978
2979 #if CONFIG_BACKGROUND_QUEUE
2980
2981 extern uint32_t vm_page_background_count;
2982 extern uint32_t vm_page_background_target;
2983 extern uint32_t vm_page_background_internal_count;
2984 extern uint32_t vm_page_background_external_count;
2985 extern uint32_t vm_page_background_mode;
2986 extern uint32_t vm_page_background_exclude_external;
2987 extern uint64_t vm_page_background_promoted_count;
2988 extern uint64_t vm_pageout_considered_bq_internal;
2989 extern uint64_t vm_pageout_considered_bq_external;
2990 extern uint64_t vm_pageout_rejected_bq_internal;
2991 extern uint64_t vm_pageout_rejected_bq_external;
2992
2993 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
2994 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
2995 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
2996 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
2997 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
2998 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
2999
3000 SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
3001 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_internal, "");
3002 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_external, "");
3003 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
3004 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
3005
3006 #endif
3007
3008 #if (DEVELOPMENT || DEBUG)
3009
3010 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
3011 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3012 &vm_page_creation_throttled_hard, 0, "");
3013
3014 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
3015 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3016 &vm_page_creation_throttled_soft, 0, "");
3017
3018 extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
3019 extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
3020 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
3021 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
3022
3023 extern uint32_t vm_grab_anon_overrides;
3024 extern uint32_t vm_grab_anon_nops;
3025
3026 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_overrides, 0, "");
3027 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_nops, 0, "");
3028
3029 /* log message counters for persistence mode */
3030 extern uint32_t oslog_p_total_msgcount;
3031 extern uint32_t oslog_p_metadata_saved_msgcount;
3032 extern uint32_t oslog_p_metadata_dropped_msgcount;
3033 extern uint32_t oslog_p_error_count;
3034 extern uint32_t oslog_p_saved_msgcount;
3035 extern uint32_t oslog_p_dropped_msgcount;
3036 extern uint32_t oslog_p_boot_dropped_msgcount;
3037
3038 /* log message counters for streaming mode */
3039 extern uint32_t oslog_s_total_msgcount;
3040 extern uint32_t oslog_s_metadata_msgcount;
3041 extern uint32_t oslog_s_error_count;
3042 extern uint32_t oslog_s_streamed_msgcount;
3043 extern uint32_t oslog_s_dropped_msgcount;
3044
3045 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
3046 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
3047 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
3048 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
3049 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
3050 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
3051 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
3052
3053 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
3054 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
3055 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
3056 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
3057 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
3058
3059
3060 #endif /* DEVELOPMENT || DEBUG */
3061
3062 /*
3063 * Enable tracing of voucher contents
3064 */
3065 extern uint32_t ipc_voucher_trace_contents;
3066
3067 SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
3068 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
3069
3070 /*
3071 * Kernel stack size and depth
3072 */
3073 SYSCTL_INT (_kern, OID_AUTO, stack_size,
3074 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
3075 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
3076 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
3077
3078 extern unsigned int kern_feature_overrides;
3079 SYSCTL_INT (_kern, OID_AUTO, kern_feature_overrides,
3080 CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
3081
3082 /*
3083 * enable back trace for port allocations
3084 */
3085 extern int ipc_portbt;
3086
3087 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
3088 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
3089 &ipc_portbt, 0, "");
3090
3091 /*
3092 * Scheduler sysctls
3093 */
3094
3095 SYSCTL_STRING(_kern, OID_AUTO, sched,
3096 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3097 sched_string, sizeof(sched_string),
3098 "Timeshare scheduler implementation");
3099
3100 /*
3101 * Only support runtime modification on embedded platforms
3102 * with development config enabled
3103 */
3104 #if CONFIG_EMBEDDED
3105 #if !SECURE_KERNEL
3106 extern int precise_user_kernel_time;
3107 SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
3108 CTLFLAG_RW | CTLFLAG_LOCKED,
3109 &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
3110 #endif
3111 #endif
3112
3113
3114 /* Parameters related to timer coalescing tuning, to be replaced
3115 * with a dedicated systemcall in the future.
3116 */
3117 /* Enable processing pending timers in the context of any other interrupt
3118 * Coalescing tuning parameters for various thread/task attributes */
3119 STATIC int
3120 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
3121 {
3122 #pragma unused(oidp)
3123 int size = arg2; /* subcommand*/
3124 int error;
3125 int changed = 0;
3126 uint64_t old_value_ns;
3127 uint64_t new_value_ns;
3128 uint64_t value_abstime;
3129 if (size == sizeof(uint32_t))
3130 value_abstime = *((uint32_t *)arg1);
3131 else if (size == sizeof(uint64_t))
3132 value_abstime = *((uint64_t *)arg1);
3133 else return ENOTSUP;
3134
3135 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
3136 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
3137 if ((error) || (!changed))
3138 return error;
3139
3140 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
3141 if (size == sizeof(uint32_t))
3142 *((uint32_t *)arg1) = (uint32_t)value_abstime;
3143 else
3144 *((uint64_t *)arg1) = value_abstime;
3145 return error;
3146 }
3147
3148 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
3149 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3150 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
3151 SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
3152 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3153 &tcoal_prio_params.timer_resort_threshold_abstime,
3154 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
3155 sysctl_timer_user_us_kernel_abstime,
3156 "Q", "");
3157 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
3158 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3159 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
3160 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
3161 sysctl_timer_user_us_kernel_abstime,
3162 "Q", "");
3163
3164 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
3165 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3166 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
3167
3168 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
3169 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3170 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
3171 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
3172 sysctl_timer_user_us_kernel_abstime,
3173 "Q", "");
3174
3175 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
3176 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3177 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
3178
3179 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
3180 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3181 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
3182 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
3183 sysctl_timer_user_us_kernel_abstime,
3184 "Q", "");
3185
3186 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
3187 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3188 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
3189
3190 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
3191 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3192 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
3193 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
3194 sysctl_timer_user_us_kernel_abstime,
3195 "Q", "");
3196
3197 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
3198 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3199 &tcoal_prio_params.latency_qos_scale[0], 0, "");
3200
3201 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
3202 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3203 &tcoal_prio_params.latency_qos_abstime_max[0],
3204 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
3205 sysctl_timer_user_us_kernel_abstime,
3206 "Q", "");
3207
3208 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
3209 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3210 &tcoal_prio_params.latency_qos_scale[1], 0, "");
3211
3212 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
3213 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3214 &tcoal_prio_params.latency_qos_abstime_max[1],
3215 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
3216 sysctl_timer_user_us_kernel_abstime,
3217 "Q", "");
3218
3219 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
3220 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3221 &tcoal_prio_params.latency_qos_scale[2], 0, "");
3222
3223 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
3224 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3225 &tcoal_prio_params.latency_qos_abstime_max[2],
3226 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
3227 sysctl_timer_user_us_kernel_abstime,
3228 "Q", "");
3229
3230 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
3231 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3232 &tcoal_prio_params.latency_qos_scale[3], 0, "");
3233
3234 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
3235 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3236 &tcoal_prio_params.latency_qos_abstime_max[3],
3237 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
3238 sysctl_timer_user_us_kernel_abstime,
3239 "Q", "");
3240
3241 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
3242 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3243 &tcoal_prio_params.latency_qos_scale[4], 0, "");
3244
3245 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
3246 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3247 &tcoal_prio_params.latency_qos_abstime_max[4],
3248 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
3249 sysctl_timer_user_us_kernel_abstime,
3250 "Q", "");
3251
3252 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
3253 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3254 &tcoal_prio_params.latency_qos_scale[5], 0, "");
3255
3256 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
3257 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3258 &tcoal_prio_params.latency_qos_abstime_max[5],
3259 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
3260 sysctl_timer_user_us_kernel_abstime,
3261 "Q", "");
3262
3263 /* Communicate the "user idle level" heuristic to the timer layer, and
3264 * potentially other layers in the future.
3265 */
3266
3267 static int
3268 timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3269 int new_value = 0, old_value = 0, changed = 0, error;
3270
3271 old_value = timer_get_user_idle_level();
3272
3273 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3274
3275 if (error == 0 && changed) {
3276 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
3277 error = ERANGE;
3278 }
3279
3280 return error;
3281 }
3282
3283 SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
3284 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3285 0, 0,
3286 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
3287
3288 #if HYPERVISOR
3289 SYSCTL_INT(_kern, OID_AUTO, hv_support,
3290 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
3291 &hv_support_available, 0, "");
3292 #endif
3293
3294 #if CONFIG_EMBEDDED
3295 STATIC int
3296 sysctl_darkboot SYSCTL_HANDLER_ARGS
3297 {
3298 int err = 0, value = 0;
3299 #pragma unused(oidp, arg1, arg2, err, value, req)
3300
3301 /*
3302 * Handle the sysctl request.
3303 *
3304 * If this is a read, the function will set the value to the current darkboot value. Otherwise,
3305 * we'll get the request identifier into "value" and then we can honor it.
3306 */
3307 if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
3308 goto exit;
3309 }
3310
3311 /* writing requested, let's process the request */
3312 if (req->newptr) {
3313 /* writing is protected by an entitlement */
3314 if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
3315 err = EPERM;
3316 goto exit;
3317 }
3318
3319 switch (value) {
3320 case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
3321 /*
3322 * If the darkboot sysctl is unset, the NVRAM variable
3323 * must be unset too. If that's not the case, it means
3324 * someone is doing something crazy and not supported.
3325 */
3326 if (darkboot != 0) {
3327 int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
3328 if (ret) {
3329 darkboot = 0;
3330 } else {
3331 err = EINVAL;
3332 }
3333 }
3334 break;
3335 case MEMORY_MAINTENANCE_DARK_BOOT_SET:
3336 darkboot = 1;
3337 break;
3338 case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
3339 /*
3340 * Set the NVRAM and update 'darkboot' in case
3341 * of success. Otherwise, do not update
3342 * 'darkboot' and report the failure.
3343 */
3344 if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
3345 darkboot = 1;
3346 } else {
3347 err = EINVAL;
3348 }
3349
3350 break;
3351 }
3352 default:
3353 err = EINVAL;
3354 }
3355 }
3356
3357 exit:
3358 return err;
3359 }
3360
3361 SYSCTL_PROC(_kern, OID_AUTO, darkboot,
3362 CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
3363 0, 0, sysctl_darkboot, "I", "");
3364 #endif
3365
3366 #if DEVELOPMENT || DEBUG
3367 #include <sys/sysent.h>
3368 /* This should result in a fatal exception, verifying that "sysent" is
3369 * write-protected.
3370 */
3371 static int
3372 kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3373 uint64_t new_value = 0, old_value = 0;
3374 int changed = 0, error;
3375
3376 error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
3377 if ((error == 0) && changed) {
3378 volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
3379 *wraddr = 0;
3380 printf("sysent[0] write succeeded\n");
3381 }
3382 return error;
3383 }
3384
3385 SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
3386 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3387 0, 0,
3388 kern_sysent_write, "I", "Attempt sysent[0] write");
3389
3390 #endif
3391
3392 #if DEVELOPMENT || DEBUG
3393 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
3394 #else
3395 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
3396 #endif
3397
3398
3399 #if DEVELOPMENT || DEBUG
3400
3401 static int
3402 sysctl_panic_test SYSCTL_HANDLER_ARGS
3403 {
3404 #pragma unused(arg1, arg2)
3405 int rval = 0;
3406 char str[32] = "entry prelog postlog postcore";
3407
3408 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3409
3410 if (rval == 0 && req->newptr) {
3411 if (strncmp("entry", str, strlen("entry")) == 0) {
3412 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
3413 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3414 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
3415 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3416 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
3417 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3418 panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
3419 }
3420 }
3421
3422 return rval;
3423 }
3424
3425 static int
3426 sysctl_debugger_test SYSCTL_HANDLER_ARGS
3427 {
3428 #pragma unused(arg1, arg2)
3429 int rval = 0;
3430 char str[32] = "entry prelog postlog postcore";
3431
3432 rval = sysctl_handle_string(oidp, str, sizeof(str), req);
3433
3434 if (rval == 0 && req->newptr) {
3435 if (strncmp("entry", str, strlen("entry")) == 0) {
3436 DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
3437 } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
3438 DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
3439 } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
3440 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
3441 } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
3442 DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
3443 }
3444 }
3445
3446 return rval;
3447 }
3448
3449 decl_lck_spin_data(, spinlock_panic_test_lock)
3450
3451 __attribute__((noreturn))
3452 static void
3453 spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
3454 {
3455 lck_spin_lock(&spinlock_panic_test_lock);
3456 while (1) { ; }
3457 }
3458
3459 static int
3460 sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
3461 {
3462 #pragma unused(oidp, arg1, arg2)
3463 if (req->newlen == 0)
3464 return EINVAL;
3465
3466 thread_t panic_spinlock_thread;
3467 /* Initialize panic spinlock */
3468 lck_grp_t * panic_spinlock_grp;
3469 lck_grp_attr_t * panic_spinlock_grp_attr;
3470 lck_attr_t * panic_spinlock_attr;
3471
3472 panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
3473 panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
3474 panic_spinlock_attr = lck_attr_alloc_init();
3475
3476 lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
3477
3478
3479 /* Create thread to acquire spinlock */
3480 if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
3481 return EBUSY;
3482 }
3483
3484 /* Try to acquire spinlock -- should panic eventually */
3485 lck_spin_lock(&spinlock_panic_test_lock);
3486 while(1) { ; }
3487 }
3488
3489 __attribute__((noreturn))
3490 static void
3491 simultaneous_panic_worker
3492 (void * arg, wait_result_t wres __unused)
3493 {
3494 atomic_int *start_panic = (atomic_int *)arg;
3495
3496 while (!atomic_load(start_panic)) { ; }
3497 panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
3498 __builtin_unreachable();
3499 }
3500
3501 static int
3502 sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
3503 {
3504 #pragma unused(oidp, arg1, arg2)
3505 if (req->newlen == 0)
3506 return EINVAL;
3507
3508 int i = 0, threads_to_create = 2 * processor_count;
3509 atomic_int start_panic = 0;
3510 unsigned int threads_created = 0;
3511 thread_t new_panic_thread;
3512
3513 for (i = threads_to_create; i > 0; i--) {
3514 if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
3515 threads_created++;
3516 }
3517 }
3518
3519 /* FAIL if we couldn't create at least processor_count threads */
3520 if (threads_created < processor_count) {
3521 panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
3522 threads_created, threads_to_create);
3523 }
3524
3525 atomic_exchange(&start_panic, 1);
3526 while (1) { ; }
3527 }
3528
3529 SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
3530 SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
3531 SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
3532 SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
3533
3534
3535 #endif /* DEVELOPMENT || DEBUG */
3536
3537 const uint32_t thread_groups_supported = 0;
3538
3539 STATIC int
3540 sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
3541 {
3542 int value = thread_groups_supported;
3543 return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
3544 }
3545
3546 SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
3547 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
3548
3549 static int
3550 sysctl_grade_cputype SYSCTL_HANDLER_ARGS
3551 {
3552 #pragma unused(arg1, arg2, oidp)
3553 int error = 0;
3554 int type_tuple[2] = {};
3555 int return_value = 0;
3556
3557 error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
3558
3559 if (error) {
3560 return error;
3561 }
3562
3563 return_value = grade_binary(type_tuple[0], type_tuple[1]);
3564
3565 error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
3566
3567 if (error) {
3568 return error;
3569 }
3570
3571 return error;
3572 }
3573
3574 SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
3575 CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED|CTLTYPE_OPAQUE,
3576 0, 0, &sysctl_grade_cputype, "S",
3577 "grade value of cpu_type_t+cpu_sub_type_t");