]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
xnu-3789.31.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107
108 #include <security/audit/audit.h>
109 #include <kern/kalloc.h>
110
111 #include <mach/machine.h>
112 #include <mach/mach_host.h>
113 #include <mach/mach_types.h>
114 #include <mach/vm_param.h>
115 #include <kern/mach_param.h>
116 #include <kern/task.h>
117 #include <kern/thread.h>
118 #include <kern/processor.h>
119 #include <kern/debug.h>
120 #include <kern/sched_prim.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_map.h>
123 #include <mach/host_info.h>
124
125 #include <sys/mount_internal.h>
126 #include <sys/kdebug.h>
127
128 #include <IOKit/IOPlatformExpert.h>
129 #include <pexpert/pexpert.h>
130
131 #include <machine/machine_routines.h>
132 #include <machine/exec.h>
133
134 #include <vm/vm_protos.h>
135 #include <vm/vm_pageout.h>
136 #include <vm/vm_compressor_algorithms.h>
137 #include <sys/imgsrc.h>
138 #include <kern/timer_call.h>
139
140 #if defined(__i386__) || defined(__x86_64__)
141 #include <i386/cpuid.h>
142 #endif
143
144 #if CONFIG_FREEZE
145 #include <sys/kern_memorystatus.h>
146 #endif
147
148 #if KPERF
149 #include <kperf/kperf.h>
150 #endif
151
152 #if HYPERVISOR
153 #include <kern/hv_support.h>
154 #endif
155
156 /*
157 * deliberately setting max requests to really high number
158 * so that runaway settings do not cause MALLOC overflows
159 */
160 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
161
162 extern int aio_max_requests;
163 extern int aio_max_requests_per_process;
164 extern int aio_worker_threads;
165 extern int lowpri_IO_window_msecs;
166 extern int lowpri_IO_delay_msecs;
167 extern int nx_enabled;
168 extern int speculative_reads_disabled;
169 extern int ignore_is_ssd;
170 extern unsigned int speculative_prefetch_max;
171 extern unsigned int speculative_prefetch_max_iosize;
172 extern unsigned int preheat_max_bytes;
173 extern unsigned int preheat_min_bytes;
174 extern long numvnodes;
175
176 extern uuid_string_t bootsessionuuid_string;
177
178 extern unsigned int vm_max_delayed_work_limit;
179 extern unsigned int vm_max_batch;
180
181 extern unsigned int vm_page_free_min;
182 extern unsigned int vm_page_free_target;
183 extern unsigned int vm_page_free_reserved;
184 extern unsigned int vm_page_speculative_percentage;
185 extern unsigned int vm_page_speculative_q_age_ms;
186
187 #if (DEVELOPMENT || DEBUG)
188 extern uint32_t vm_page_creation_throttled_hard;
189 extern uint32_t vm_page_creation_throttled_soft;
190 #endif /* DEVELOPMENT || DEBUG */
191
192 /*
193 * Conditionally allow dtrace to see these functions for debugging purposes.
194 */
195 #ifdef STATIC
196 #undef STATIC
197 #endif
198 #if 0
199 #define STATIC
200 #else
201 #define STATIC static
202 #endif
203
204 extern boolean_t mach_timer_coalescing_enabled;
205
206 extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
207
208 STATIC void
209 fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
210 STATIC void
211 fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
212 STATIC void
213 fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
214 STATIC void
215 fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
216 STATIC void
217 fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
218 STATIC void
219 fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
220
221 extern int
222 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
223 #if NFSCLIENT
224 extern int
225 netboot_root(void);
226 #endif
227 int
228 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
229 proc_t p);
230 __private_extern__ kern_return_t
231 reset_vmobjectcache(unsigned int val1, unsigned int val2);
232 int
233 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
234 size_t *sizep, proc_t cur_proc);
235 STATIC int
236 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
237 proc_t cur_proc, int argc_yes);
238 int
239 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
240 size_t newlen, void *sp, int len);
241
242 STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
243 STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
244 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
245 STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
246 STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
247 int sysdoproc_callback(proc_t p, void *arg);
248
249
250 /* forward declarations for non-static STATIC */
251 STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
252 STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
253 STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
254 STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
255 STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
256 STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
257 #if COUNT_SYSCALLS
258 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
259 #endif /* COUNT_SYSCALLS */
260 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
261 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
262 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
263 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
264 STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
265 STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
266 STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
267 STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
268 STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269 STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270 STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271 STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272 STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273 STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274 STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275 STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
276 #if NFSCLIENT
277 STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278 #endif
279 #ifdef CONFIG_IMGSRC_ACCESS
280 STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
281 #endif
282 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
284 #if CONFIG_COREDUMP
285 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287 #endif
288 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291 STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292 STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
293 STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294 STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295 STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
296 STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
300 STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
301 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
302
303
304 extern void IORegistrySetOSBuildVersion(char * build_version);
305
306 STATIC void
307 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
308 {
309 la64->ldavg[0] = la->ldavg[0];
310 la64->ldavg[1] = la->ldavg[1];
311 la64->ldavg[2] = la->ldavg[2];
312 la64->fscale = (user64_long_t)la->fscale;
313 }
314
315 STATIC void
316 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
317 {
318 la32->ldavg[0] = la->ldavg[0];
319 la32->ldavg[1] = la->ldavg[1];
320 la32->ldavg[2] = la->ldavg[2];
321 la32->fscale = (user32_long_t)la->fscale;
322 }
323
324 #if CONFIG_COREDUMP
325 /*
326 * Attributes stored in the kernel.
327 */
328 extern char corefilename[MAXPATHLEN+1];
329 extern int do_coredump;
330 extern int sugid_coredump;
331 #endif
332
333 #if COUNT_SYSCALLS
334 extern int do_count_syscalls;
335 #endif
336
337 #ifdef INSECURE
338 int securelevel = -1;
339 #else
340 int securelevel;
341 #endif
342
343 STATIC int
344 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
345 __unused int arg2, struct sysctl_req *req)
346 {
347 int error;
348 struct uthread *ut = get_bsdthread_info(current_thread());
349 user_addr_t oldp=0, newp=0;
350 size_t *oldlenp=NULL;
351 size_t newlen=0;
352
353 oldp = req->oldptr;
354 oldlenp = &(req->oldlen);
355 newp = req->newptr;
356 newlen = req->newlen;
357
358 /* We want the current length, and maybe the string itself */
359 if(oldlenp) {
360 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
361 size_t currlen = MAXTHREADNAMESIZE - 1;
362
363 if(ut->pth_name)
364 /* use length of current thread name */
365 currlen = strlen(ut->pth_name);
366 if(oldp) {
367 if(*oldlenp < currlen)
368 return ENOMEM;
369 /* NOTE - we do not copy the NULL terminator */
370 if(ut->pth_name) {
371 error = copyout(ut->pth_name,oldp,currlen);
372 if(error)
373 return error;
374 }
375 }
376 /* return length of thread name minus NULL terminator (just like strlen) */
377 req->oldidx = currlen;
378 }
379
380 /* We want to set the name to something */
381 if(newp)
382 {
383 if(newlen > (MAXTHREADNAMESIZE - 1))
384 return ENAMETOOLONG;
385 if(!ut->pth_name)
386 {
387 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
388 if(!ut->pth_name)
389 return ENOMEM;
390 } else {
391 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
392 }
393 bzero(ut->pth_name, MAXTHREADNAMESIZE);
394 error = copyin(newp, ut->pth_name, newlen);
395 if (error) {
396 return error;
397 }
398
399 kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
400 }
401
402 return 0;
403 }
404
405 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
406
407 #define BSD_HOST 1
408 STATIC int
409 sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
410 {
411 host_basic_info_data_t hinfo;
412 kern_return_t kret;
413 uint32_t size;
414 int changed;
415 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
416 struct _processor_statistics_np *buf;
417 int error;
418
419 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
420 if (kret != KERN_SUCCESS) {
421 return EINVAL;
422 }
423
424 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
425
426 if (req->oldlen < size) {
427 return EINVAL;
428 }
429
430 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
431
432 kret = get_sched_statistics(buf, &size);
433 if (kret != KERN_SUCCESS) {
434 error = EINVAL;
435 goto out;
436 }
437
438 error = sysctl_io_opaque(req, buf, size, &changed);
439 if (error) {
440 goto out;
441 }
442
443 if (changed) {
444 panic("Sched info changed?!");
445 }
446 out:
447 FREE(buf, M_TEMP);
448 return error;
449 }
450
451 SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
452
453 STATIC int
454 sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
455 {
456 boolean_t active;
457 int res;
458
459 if (req->newlen != sizeof(active)) {
460 return EINVAL;
461 }
462
463 res = copyin(req->newptr, &active, sizeof(active));
464 if (res != 0) {
465 return res;
466 }
467
468 return set_sched_stats_active(active);
469 }
470
471 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
472
473 extern uint32_t sched_debug_flags;
474 SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
475
476 #if (DEBUG || DEVELOPMENT)
477 extern boolean_t doprnt_hide_pointers;
478 SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
479 #endif
480
481 extern int get_kernel_symfile(proc_t, char **);
482
483 #if COUNT_SYSCALLS
484 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
485
486 extern unsigned int nsysent;
487 extern int syscalls_log[];
488 extern const char *syscallnames[];
489
490 STATIC int
491 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
492 {
493 __unused int cmd = oidp->oid_arg2; /* subcommand*/
494 __unused int *name = arg1; /* oid element argument vector */
495 __unused int namelen = arg2; /* number of oid element arguments */
496 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
497 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
498 user_addr_t newp = req->newptr; /* user buffer copy in address */
499 size_t newlen = req->newlen; /* user buffer copy in size */
500 int error;
501
502 int tmp;
503
504 /* valid values passed in:
505 * = 0 means don't keep called counts for each bsd syscall
506 * > 0 means keep called counts for each bsd syscall
507 * = 2 means dump current counts to the system log
508 * = 3 means reset all counts
509 * for example, to dump current counts:
510 * sysctl -w kern.count_calls=2
511 */
512 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
513 if ( error != 0 ) {
514 return (error);
515 }
516
517 if ( tmp == 1 ) {
518 do_count_syscalls = 1;
519 }
520 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
521 int i;
522 for ( i = 0; i < nsysent; i++ ) {
523 if ( syscalls_log[i] != 0 ) {
524 if ( tmp == 2 ) {
525 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
526 }
527 else {
528 syscalls_log[i] = 0;
529 }
530 }
531 }
532 if ( tmp != 0 ) {
533 do_count_syscalls = 1;
534 }
535 }
536
537 /* adjust index so we return the right required/consumed amount */
538 if (!error)
539 req->oldidx += req->oldlen;
540
541 return (error);
542 }
543 SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
544 0, /* Pointer argument (arg1) */
545 0, /* Integer argument (arg2) */
546 sysctl_docountsyscalls, /* Handler function */
547 NULL, /* Data pointer */
548 "");
549 #endif /* COUNT_SYSCALLS */
550
551 /*
552 * The following sysctl_* functions should not be used
553 * any more, as they can only cope with callers in
554 * user mode: Use new-style
555 * sysctl_io_number()
556 * sysctl_io_string()
557 * sysctl_io_opaque()
558 * instead.
559 */
560
561 /*
562 * Validate parameters and get old / set new parameters
563 * for an integer-valued sysctl function.
564 */
565 int
566 sysctl_int(user_addr_t oldp, size_t *oldlenp,
567 user_addr_t newp, size_t newlen, int *valp)
568 {
569 int error = 0;
570
571 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
572 return (EFAULT);
573 if (oldp && *oldlenp < sizeof(int))
574 return (ENOMEM);
575 if (newp && newlen != sizeof(int))
576 return (EINVAL);
577 *oldlenp = sizeof(int);
578 if (oldp)
579 error = copyout(valp, oldp, sizeof(int));
580 if (error == 0 && newp) {
581 error = copyin(newp, valp, sizeof(int));
582 AUDIT_ARG(value32, *valp);
583 }
584 return (error);
585 }
586
587 /*
588 * Validate parameters and get old / set new parameters
589 * for an quad(64bit)-valued sysctl function.
590 */
591 int
592 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
593 user_addr_t newp, size_t newlen, quad_t *valp)
594 {
595 int error = 0;
596
597 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
598 return (EFAULT);
599 if (oldp && *oldlenp < sizeof(quad_t))
600 return (ENOMEM);
601 if (newp && newlen != sizeof(quad_t))
602 return (EINVAL);
603 *oldlenp = sizeof(quad_t);
604 if (oldp)
605 error = copyout(valp, oldp, sizeof(quad_t));
606 if (error == 0 && newp)
607 error = copyin(newp, valp, sizeof(quad_t));
608 return (error);
609 }
610
611 STATIC int
612 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
613 {
614 if (p->p_pid != (pid_t)*(int*)arg)
615 return(0);
616 else
617 return(1);
618 }
619
620 STATIC int
621 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
622 {
623 if (p->p_pgrpid != (pid_t)*(int*)arg)
624 return(0);
625 else
626 return(1);
627 }
628
629 STATIC int
630 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
631 {
632 int retval;
633 struct tty *tp;
634
635 /* This is very racy but list lock is held.. Hmmm. */
636 if ((p->p_flag & P_CONTROLT) == 0 ||
637 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
638 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
639 tp->t_dev != (dev_t)*(int*)arg)
640 retval = 0;
641 else
642 retval = 1;
643
644 return(retval);
645 }
646
647 STATIC int
648 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
649 {
650 kauth_cred_t my_cred;
651 uid_t uid;
652
653 if (p->p_ucred == NULL)
654 return(0);
655 my_cred = kauth_cred_proc_ref(p);
656 uid = kauth_cred_getuid(my_cred);
657 kauth_cred_unref(&my_cred);
658
659 if (uid != (uid_t)*(int*)arg)
660 return(0);
661 else
662 return(1);
663 }
664
665
666 STATIC int
667 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
668 {
669 kauth_cred_t my_cred;
670 uid_t ruid;
671
672 if (p->p_ucred == NULL)
673 return(0);
674 my_cred = kauth_cred_proc_ref(p);
675 ruid = kauth_cred_getruid(my_cred);
676 kauth_cred_unref(&my_cred);
677
678 if (ruid != (uid_t)*(int*)arg)
679 return(0);
680 else
681 return(1);
682 }
683
684 /*
685 * try over estimating by 5 procs
686 */
687 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
688 struct sysdoproc_args {
689 int buflen;
690 void *kprocp;
691 boolean_t is_64_bit;
692 user_addr_t dp;
693 size_t needed;
694 int sizeof_kproc;
695 int *errorp;
696 int uidcheck;
697 int ruidcheck;
698 int ttycheck;
699 int uidval;
700 };
701
702 int
703 sysdoproc_callback(proc_t p, void *arg)
704 {
705 struct sysdoproc_args *args = arg;
706
707 if (args->buflen >= args->sizeof_kproc) {
708 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
709 return (PROC_RETURNED);
710 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
711 return (PROC_RETURNED);
712 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
713 return (PROC_RETURNED);
714
715 bzero(args->kprocp, args->sizeof_kproc);
716 if (args->is_64_bit)
717 fill_user64_proc(p, args->kprocp);
718 else
719 fill_user32_proc(p, args->kprocp);
720 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
721 if (error) {
722 *args->errorp = error;
723 return (PROC_RETURNED_DONE);
724 }
725 args->dp += args->sizeof_kproc;
726 args->buflen -= args->sizeof_kproc;
727 }
728 args->needed += args->sizeof_kproc;
729 return (PROC_RETURNED);
730 }
731
732 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
733 STATIC int
734 sysctl_prochandle SYSCTL_HANDLER_ARGS
735 {
736 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
737 int *name = arg1; /* oid element argument vector */
738 int namelen = arg2; /* number of oid element arguments */
739 user_addr_t where = req->oldptr;/* user buffer copy out address */
740
741 user_addr_t dp = where;
742 size_t needed = 0;
743 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
744 int error = 0;
745 boolean_t is_64_bit = proc_is64bit(current_proc());
746 struct user32_kinfo_proc user32_kproc;
747 struct user64_kinfo_proc user_kproc;
748 int sizeof_kproc;
749 void *kprocp;
750 int (*filterfn)(proc_t, void *) = 0;
751 struct sysdoproc_args args;
752 int uidcheck = 0;
753 int ruidcheck = 0;
754 int ttycheck = 0;
755
756 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
757 return (EINVAL);
758
759 if (is_64_bit) {
760 sizeof_kproc = sizeof(user_kproc);
761 kprocp = &user_kproc;
762 } else {
763 sizeof_kproc = sizeof(user32_kproc);
764 kprocp = &user32_kproc;
765 }
766
767 switch (cmd) {
768
769 case KERN_PROC_PID:
770 filterfn = sysdoproc_filt_KERN_PROC_PID;
771 break;
772
773 case KERN_PROC_PGRP:
774 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
775 break;
776
777 case KERN_PROC_TTY:
778 ttycheck = 1;
779 break;
780
781 case KERN_PROC_UID:
782 uidcheck = 1;
783 break;
784
785 case KERN_PROC_RUID:
786 ruidcheck = 1;
787 break;
788
789 case KERN_PROC_ALL:
790 break;
791
792 default:
793 /* must be kern.proc.<unknown> */
794 return (ENOTSUP);
795 }
796
797 error = 0;
798 args.buflen = buflen;
799 args.kprocp = kprocp;
800 args.is_64_bit = is_64_bit;
801 args.dp = dp;
802 args.needed = needed;
803 args.errorp = &error;
804 args.uidcheck = uidcheck;
805 args.ruidcheck = ruidcheck;
806 args.ttycheck = ttycheck;
807 args.sizeof_kproc = sizeof_kproc;
808 if (namelen)
809 args.uidval = name[0];
810
811 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
812 sysdoproc_callback, &args, filterfn, name);
813
814 if (error)
815 return (error);
816
817 dp = args.dp;
818 needed = args.needed;
819
820 if (where != USER_ADDR_NULL) {
821 req->oldlen = dp - where;
822 if (needed > req->oldlen)
823 return (ENOMEM);
824 } else {
825 needed += KERN_PROCSLOP;
826 req->oldlen = needed;
827 }
828 /* adjust index so we return the right required/consumed amount */
829 req->oldidx += req->oldlen;
830 return (0);
831 }
832
833 /*
834 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
835 * in the sysctl declaration itself, which comes into the handler function
836 * as 'oidp->oid_arg2'.
837 *
838 * For these particular sysctls, since they have well known OIDs, we could
839 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
840 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
841 * of a well known value with a common handler function. This is desirable,
842 * because we want well known values to "go away" at some future date.
843 *
844 * It should be noted that the value of '((int *)arg1)[1]' is used for many
845 * an integer parameter to the subcommand for many of these sysctls; we'd
846 * rather have used '((int *)arg1)[0]' for that, or even better, an element
847 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
848 * and then use leaf-node permissions enforcement, but that would have
849 * necessitated modifying user space code to correspond to the interface
850 * change, and we are striving for binary backward compatibility here; even
851 * though these are SPI, and not intended for use by user space applications
852 * which are not themselves system tools or libraries, some applications
853 * have erroneously used them.
854 */
855 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
856 0, /* Pointer argument (arg1) */
857 KERN_PROC_ALL, /* Integer argument (arg2) */
858 sysctl_prochandle, /* Handler function */
859 NULL, /* Data is size variant on ILP32/LP64 */
860 "");
861 SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
862 0, /* Pointer argument (arg1) */
863 KERN_PROC_PID, /* Integer argument (arg2) */
864 sysctl_prochandle, /* Handler function */
865 NULL, /* Data is size variant on ILP32/LP64 */
866 "");
867 SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
868 0, /* Pointer argument (arg1) */
869 KERN_PROC_TTY, /* Integer argument (arg2) */
870 sysctl_prochandle, /* Handler function */
871 NULL, /* Data is size variant on ILP32/LP64 */
872 "");
873 SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
874 0, /* Pointer argument (arg1) */
875 KERN_PROC_PGRP, /* Integer argument (arg2) */
876 sysctl_prochandle, /* Handler function */
877 NULL, /* Data is size variant on ILP32/LP64 */
878 "");
879 SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
880 0, /* Pointer argument (arg1) */
881 KERN_PROC_UID, /* Integer argument (arg2) */
882 sysctl_prochandle, /* Handler function */
883 NULL, /* Data is size variant on ILP32/LP64 */
884 "");
885 SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
886 0, /* Pointer argument (arg1) */
887 KERN_PROC_RUID, /* Integer argument (arg2) */
888 sysctl_prochandle, /* Handler function */
889 NULL, /* Data is size variant on ILP32/LP64 */
890 "");
891 SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
892 0, /* Pointer argument (arg1) */
893 KERN_PROC_LCID, /* Integer argument (arg2) */
894 sysctl_prochandle, /* Handler function */
895 NULL, /* Data is size variant on ILP32/LP64 */
896 "");
897
898
899 /*
900 * Fill in non-zero fields of an eproc structure for the specified process.
901 */
902 STATIC void
903 fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
904 {
905 struct tty *tp;
906 struct pgrp *pg;
907 struct session *sessp;
908 kauth_cred_t my_cred;
909
910 pg = proc_pgrp(p);
911 sessp = proc_session(p);
912
913 if (pg != PGRP_NULL) {
914 ep->e_pgid = p->p_pgrpid;
915 ep->e_jobc = pg->pg_jobc;
916 if (sessp != SESSION_NULL && sessp->s_ttyvp)
917 ep->e_flag = EPROC_CTTY;
918 }
919 ep->e_ppid = p->p_ppid;
920 if (p->p_ucred) {
921 my_cred = kauth_cred_proc_ref(p);
922
923 /* A fake historical pcred */
924 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
925 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
926 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
927 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
928
929 /* A fake historical *kauth_cred_t */
930 ep->e_ucred.cr_ref = my_cred->cr_ref;
931 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
932 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
933 bcopy(posix_cred_get(my_cred)->cr_groups,
934 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
935
936 kauth_cred_unref(&my_cred);
937 }
938
939 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
940 (tp = SESSION_TP(sessp))) {
941 ep->e_tdev = tp->t_dev;
942 ep->e_tpgid = sessp->s_ttypgrpid;
943 } else
944 ep->e_tdev = NODEV;
945
946 if (sessp != SESSION_NULL) {
947 if (SESS_LEADER(p, sessp))
948 ep->e_flag |= EPROC_SLEADER;
949 session_rele(sessp);
950 }
951 if (pg != PGRP_NULL)
952 pg_rele(pg);
953 }
954
955 /*
956 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
957 */
958 STATIC void
959 fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
960 {
961 struct tty *tp;
962 struct pgrp *pg;
963 struct session *sessp;
964 kauth_cred_t my_cred;
965
966 pg = proc_pgrp(p);
967 sessp = proc_session(p);
968
969 if (pg != PGRP_NULL) {
970 ep->e_pgid = p->p_pgrpid;
971 ep->e_jobc = pg->pg_jobc;
972 if (sessp != SESSION_NULL && sessp->s_ttyvp)
973 ep->e_flag = EPROC_CTTY;
974 }
975 ep->e_ppid = p->p_ppid;
976 if (p->p_ucred) {
977 my_cred = kauth_cred_proc_ref(p);
978
979 /* A fake historical pcred */
980 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
981 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
982 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
983 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
984
985 /* A fake historical *kauth_cred_t */
986 ep->e_ucred.cr_ref = my_cred->cr_ref;
987 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
988 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
989 bcopy(posix_cred_get(my_cred)->cr_groups,
990 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
991
992 kauth_cred_unref(&my_cred);
993 }
994
995 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
996 (tp = SESSION_TP(sessp))) {
997 ep->e_tdev = tp->t_dev;
998 ep->e_tpgid = sessp->s_ttypgrpid;
999 } else
1000 ep->e_tdev = NODEV;
1001
1002 if (sessp != SESSION_NULL) {
1003 if (SESS_LEADER(p, sessp))
1004 ep->e_flag |= EPROC_SLEADER;
1005 session_rele(sessp);
1006 }
1007 if (pg != PGRP_NULL)
1008 pg_rele(pg);
1009 }
1010
1011 /*
1012 * Fill in an eproc structure for the specified process.
1013 * bzeroed by our caller, so only set non-zero fields.
1014 */
1015 STATIC void
1016 fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1017 {
1018 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1019 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1020 exp->p_flag = p->p_flag;
1021 if (p->p_lflag & P_LTRACED)
1022 exp->p_flag |= P_TRACED;
1023 if (p->p_lflag & P_LPPWAIT)
1024 exp->p_flag |= P_PPWAIT;
1025 if (p->p_lflag & P_LEXIT)
1026 exp->p_flag |= P_WEXIT;
1027 exp->p_stat = p->p_stat;
1028 exp->p_pid = p->p_pid;
1029 exp->p_oppid = p->p_oppid;
1030 /* Mach related */
1031 exp->user_stack = p->user_stack;
1032 exp->p_debugger = p->p_debugger;
1033 exp->sigwait = p->sigwait;
1034 /* scheduling */
1035 #ifdef _PROC_HAS_SCHEDINFO_
1036 exp->p_estcpu = p->p_estcpu;
1037 exp->p_pctcpu = p->p_pctcpu;
1038 exp->p_slptime = p->p_slptime;
1039 #endif
1040 exp->p_realtimer.it_interval.tv_sec =
1041 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1042 exp->p_realtimer.it_interval.tv_usec =
1043 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1044
1045 exp->p_realtimer.it_value.tv_sec =
1046 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1047 exp->p_realtimer.it_value.tv_usec =
1048 (__int32_t)p->p_realtimer.it_value.tv_usec;
1049
1050 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1051 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1052
1053 exp->p_sigignore = p->p_sigignore;
1054 exp->p_sigcatch = p->p_sigcatch;
1055 exp->p_priority = p->p_priority;
1056 exp->p_nice = p->p_nice;
1057 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1058 exp->p_xstat = p->p_xstat;
1059 exp->p_acflag = p->p_acflag;
1060 }
1061
1062 /*
1063 * Fill in an LP64 version of extern_proc structure for the specified process.
1064 */
1065 STATIC void
1066 fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1067 {
1068 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1069 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1070 exp->p_flag = p->p_flag;
1071 if (p->p_lflag & P_LTRACED)
1072 exp->p_flag |= P_TRACED;
1073 if (p->p_lflag & P_LPPWAIT)
1074 exp->p_flag |= P_PPWAIT;
1075 if (p->p_lflag & P_LEXIT)
1076 exp->p_flag |= P_WEXIT;
1077 exp->p_stat = p->p_stat;
1078 exp->p_pid = p->p_pid;
1079 exp->p_oppid = p->p_oppid;
1080 /* Mach related */
1081 exp->user_stack = p->user_stack;
1082 exp->p_debugger = p->p_debugger;
1083 exp->sigwait = p->sigwait;
1084 /* scheduling */
1085 #ifdef _PROC_HAS_SCHEDINFO_
1086 exp->p_estcpu = p->p_estcpu;
1087 exp->p_pctcpu = p->p_pctcpu;
1088 exp->p_slptime = p->p_slptime;
1089 #endif
1090 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1091 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1092
1093 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1094 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1095
1096 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1097 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1098
1099 exp->p_sigignore = p->p_sigignore;
1100 exp->p_sigcatch = p->p_sigcatch;
1101 exp->p_priority = p->p_priority;
1102 exp->p_nice = p->p_nice;
1103 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1104 exp->p_xstat = p->p_xstat;
1105 exp->p_acflag = p->p_acflag;
1106 }
1107
1108 STATIC void
1109 fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1110 {
1111 /* on a 64 bit kernel, 32 bit users get some truncated information */
1112 fill_user32_externproc(p, &kp->kp_proc);
1113 fill_user32_eproc(p, &kp->kp_eproc);
1114 }
1115
1116 STATIC void
1117 fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1118 {
1119 fill_user64_externproc(p, &kp->kp_proc);
1120 fill_user64_eproc(p, &kp->kp_eproc);
1121 }
1122
1123 STATIC int
1124 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1125 {
1126 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1127 int *name = arg1; /* oid element argument vector */
1128 int namelen = arg2; /* number of oid element arguments */
1129 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1130 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1131 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1132 // size_t newlen = req->newlen; /* user buffer copy in size */
1133
1134 int ret=0;
1135
1136 if (namelen == 0)
1137 return(ENOTSUP);
1138
1139 switch(name[0]) {
1140 case KERN_KDEFLAGS:
1141 case KERN_KDDFLAGS:
1142 case KERN_KDENABLE:
1143 case KERN_KDGETBUF:
1144 case KERN_KDSETUP:
1145 case KERN_KDREMOVE:
1146 case KERN_KDSETREG:
1147 case KERN_KDGETREG:
1148 case KERN_KDREADTR:
1149 case KERN_KDWRITETR:
1150 case KERN_KDWRITEMAP:
1151 case KERN_KDTEST:
1152 case KERN_KDPIDTR:
1153 case KERN_KDTHRMAP:
1154 case KERN_KDPIDEX:
1155 case KERN_KDSETBUF:
1156 case KERN_KDGETENTROPY:
1157 case KERN_KDREADCURTHRMAP:
1158 case KERN_KDSET_TYPEFILTER:
1159 case KERN_KDBUFWAIT:
1160 case KERN_KDCPUMAP:
1161 case KERN_KDWRITEMAP_V3:
1162 case KERN_KDWRITETR_V3:
1163 ret = kdbg_control(name, namelen, oldp, oldlenp);
1164 break;
1165 default:
1166 ret= ENOTSUP;
1167 break;
1168 }
1169
1170 /* adjust index so we return the right required/consumed amount */
1171 if (!ret)
1172 req->oldidx += req->oldlen;
1173
1174 return (ret);
1175 }
1176 SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1177 0, /* Pointer argument (arg1) */
1178 0, /* Integer argument (arg2) */
1179 sysctl_kdebug_ops, /* Handler function */
1180 NULL, /* Data pointer */
1181 "");
1182
1183
1184 /*
1185 * Return the top *sizep bytes of the user stack, or the entire area of the
1186 * user stack down through the saved exec_path, whichever is smaller.
1187 */
1188 STATIC int
1189 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1190 {
1191 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1192 int *name = arg1; /* oid element argument vector */
1193 int namelen = arg2; /* number of oid element arguments */
1194 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1195 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1196 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1197 // size_t newlen = req->newlen; /* user buffer copy in size */
1198 int error;
1199
1200 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1201
1202 /* adjust index so we return the right required/consumed amount */
1203 if (!error)
1204 req->oldidx += req->oldlen;
1205
1206 return (error);
1207 }
1208 SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1209 0, /* Pointer argument (arg1) */
1210 0, /* Integer argument (arg2) */
1211 sysctl_doprocargs, /* Handler function */
1212 NULL, /* Data pointer */
1213 "");
1214
1215 STATIC int
1216 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1217 {
1218 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1219 int *name = arg1; /* oid element argument vector */
1220 int namelen = arg2; /* number of oid element arguments */
1221 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1222 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1223 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1224 // size_t newlen = req->newlen; /* user buffer copy in size */
1225 int error;
1226
1227 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1228
1229 /* adjust index so we return the right required/consumed amount */
1230 if (!error)
1231 req->oldidx += req->oldlen;
1232
1233 return (error);
1234 }
1235 SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1236 0, /* Pointer argument (arg1) */
1237 0, /* Integer argument (arg2) */
1238 sysctl_doprocargs2, /* Handler function */
1239 NULL, /* Data pointer */
1240 "");
1241
1242 STATIC int
1243 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1244 size_t *sizep, proc_t cur_proc, int argc_yes)
1245 {
1246 proc_t p;
1247 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1248 int error = 0;
1249 struct _vm_map *proc_map;
1250 struct task * task;
1251 vm_map_copy_t tmp;
1252 user_addr_t arg_addr;
1253 size_t arg_size;
1254 caddr_t data;
1255 size_t argslen=0;
1256 int size;
1257 vm_offset_t copy_start, copy_end;
1258 kern_return_t ret;
1259 int pid;
1260 kauth_cred_t my_cred;
1261 uid_t uid;
1262
1263 if ( namelen < 1 )
1264 return(EINVAL);
1265
1266 if (argc_yes)
1267 buflen -= sizeof(int); /* reserve first word to return argc */
1268
1269 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1270 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1271 /* is not NULL then the caller wants us to return the length needed to */
1272 /* hold the data we would return */
1273 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1274 return(EINVAL);
1275 }
1276 arg_size = buflen;
1277
1278 /*
1279 * Lookup process by pid
1280 */
1281 pid = name[0];
1282 p = proc_find(pid);
1283 if (p == NULL) {
1284 return(EINVAL);
1285 }
1286
1287 /*
1288 * Copy the top N bytes of the stack.
1289 * On all machines we have so far, the stack grows
1290 * downwards.
1291 *
1292 * If the user expects no more than N bytes of
1293 * argument list, use that as a guess for the
1294 * size.
1295 */
1296
1297 if (!p->user_stack) {
1298 proc_rele(p);
1299 return(EINVAL);
1300 }
1301
1302 if (where == USER_ADDR_NULL) {
1303 /* caller only wants to know length of proc args data */
1304 if (sizep == NULL) {
1305 proc_rele(p);
1306 return(EFAULT);
1307 }
1308
1309 size = p->p_argslen;
1310 proc_rele(p);
1311 if (argc_yes) {
1312 size += sizeof(int);
1313 }
1314 else {
1315 /*
1316 * old PROCARGS will return the executable's path and plus some
1317 * extra space for work alignment and data tags
1318 */
1319 size += PATH_MAX + (6 * sizeof(int));
1320 }
1321 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1322 *sizep = size;
1323 return (0);
1324 }
1325
1326 my_cred = kauth_cred_proc_ref(p);
1327 uid = kauth_cred_getuid(my_cred);
1328 kauth_cred_unref(&my_cred);
1329
1330 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1331 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1332 proc_rele(p);
1333 return (EINVAL);
1334 }
1335
1336 if ((u_int)arg_size > p->p_argslen)
1337 arg_size = round_page(p->p_argslen);
1338
1339 arg_addr = p->user_stack - arg_size;
1340
1341
1342 /*
1343 * Before we can block (any VM code), make another
1344 * reference to the map to keep it alive. We do
1345 * that by getting a reference on the task itself.
1346 */
1347 task = p->task;
1348 if (task == NULL) {
1349 proc_rele(p);
1350 return(EINVAL);
1351 }
1352
1353 argslen = p->p_argslen;
1354 /*
1355 * Once we have a task reference we can convert that into a
1356 * map reference, which we will use in the calls below. The
1357 * task/process may change its map after we take this reference
1358 * (see execve), but the worst that will happen then is a return
1359 * of stale info (which is always a possibility).
1360 */
1361 task_reference(task);
1362 proc_rele(p);
1363 proc_map = get_task_map_reference(task);
1364 task_deallocate(task);
1365
1366 if (proc_map == NULL)
1367 return(EINVAL);
1368
1369
1370 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size), VM_KERN_MEMORY_BSD);
1371 if (ret != KERN_SUCCESS) {
1372 vm_map_deallocate(proc_map);
1373 return(ENOMEM);
1374 }
1375
1376 copy_end = round_page(copy_start + arg_size);
1377
1378 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1379 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1380 vm_map_deallocate(proc_map);
1381 kmem_free(kernel_map, copy_start,
1382 round_page(arg_size));
1383 return (EIO);
1384 }
1385
1386 /*
1387 * Now that we've done the copyin from the process'
1388 * map, we can release the reference to it.
1389 */
1390 vm_map_deallocate(proc_map);
1391
1392 if( vm_map_copy_overwrite(kernel_map,
1393 (vm_map_address_t)copy_start,
1394 tmp, FALSE) != KERN_SUCCESS) {
1395 kmem_free(kernel_map, copy_start,
1396 round_page(arg_size));
1397 vm_map_copy_discard(tmp);
1398 return (EIO);
1399 }
1400
1401 if (arg_size > argslen) {
1402 data = (caddr_t) (copy_end - argslen);
1403 size = argslen;
1404 } else {
1405 data = (caddr_t) (copy_end - arg_size);
1406 size = arg_size;
1407 }
1408
1409 /*
1410 * When these sysctls were introduced, the first string in the strings
1411 * section was just the bare path of the executable. However, for security
1412 * reasons we now prefix this string with executable_path= so it can be
1413 * parsed getenv style. To avoid binary compatability issues with exising
1414 * callers of this sysctl, we strip it off here if present.
1415 * (rdar://problem/13746466)
1416 */
1417 #define EXECUTABLE_KEY "executable_path="
1418 if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
1419 data += strlen(EXECUTABLE_KEY);
1420 size -= strlen(EXECUTABLE_KEY);
1421 }
1422
1423 if (argc_yes) {
1424 /* Put processes argc as the first word in the copyout buffer */
1425 suword(where, p->p_argc);
1426 error = copyout(data, (where + sizeof(int)), size);
1427 size += sizeof(int);
1428 } else {
1429 error = copyout(data, where, size);
1430
1431 /*
1432 * Make the old PROCARGS work to return the executable's path
1433 * But, only if there is enough space in the provided buffer
1434 *
1435 * on entry: data [possibily] points to the beginning of the path
1436 *
1437 * Note: we keep all pointers&sizes aligned to word boundries
1438 */
1439 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1440 {
1441 int binPath_sz, alignedBinPath_sz = 0;
1442 int extraSpaceNeeded, addThis;
1443 user_addr_t placeHere;
1444 char * str = (char *) data;
1445 int max_len = size;
1446
1447 /* Some apps are really bad about messing up their stacks
1448 So, we have to be extra careful about getting the length
1449 of the executing binary. If we encounter an error, we bail.
1450 */
1451
1452 /* Limit ourselves to PATH_MAX paths */
1453 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1454
1455 binPath_sz = 0;
1456
1457 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1458 binPath_sz++;
1459
1460 /* If we have a NUL terminator, copy it, too */
1461 if (binPath_sz < max_len-1) binPath_sz += 1;
1462
1463 /* Pre-Flight the space requiremnts */
1464
1465 /* Account for the padding that fills out binPath to the next word */
1466 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1467
1468 placeHere = where + size;
1469
1470 /* Account for the bytes needed to keep placeHere word aligned */
1471 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1472
1473 /* Add up all the space that is needed */
1474 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1475
1476 /* is there is room to tack on argv[0]? */
1477 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1478 {
1479 placeHere += addThis;
1480 suword(placeHere, 0);
1481 placeHere += sizeof(int);
1482 suword(placeHere, 0xBFFF0000);
1483 placeHere += sizeof(int);
1484 suword(placeHere, 0);
1485 placeHere += sizeof(int);
1486 error = copyout(data, placeHere, binPath_sz);
1487 if ( ! error )
1488 {
1489 placeHere += binPath_sz;
1490 suword(placeHere, 0);
1491 size += extraSpaceNeeded;
1492 }
1493 }
1494 }
1495 }
1496
1497 if (copy_start != (vm_offset_t) 0) {
1498 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1499 }
1500 if (error) {
1501 return(error);
1502 }
1503
1504 if (where != USER_ADDR_NULL)
1505 *sizep = size;
1506 return (0);
1507 }
1508
1509
1510 /*
1511 * Max number of concurrent aio requests
1512 */
1513 STATIC int
1514 sysctl_aiomax
1515 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1516 {
1517 int new_value, changed;
1518 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1519 if (changed) {
1520 /* make sure the system-wide limit is greater than the per process limit */
1521 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1522 aio_max_requests = new_value;
1523 else
1524 error = EINVAL;
1525 }
1526 return(error);
1527 }
1528
1529
1530 /*
1531 * Max number of concurrent aio requests per process
1532 */
1533 STATIC int
1534 sysctl_aioprocmax
1535 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1536 {
1537 int new_value, changed;
1538 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1539 if (changed) {
1540 /* make sure per process limit is less than the system-wide limit */
1541 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1542 aio_max_requests_per_process = new_value;
1543 else
1544 error = EINVAL;
1545 }
1546 return(error);
1547 }
1548
1549
1550 /*
1551 * Max number of async IO worker threads
1552 */
1553 STATIC int
1554 sysctl_aiothreads
1555 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1556 {
1557 int new_value, changed;
1558 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1559 if (changed) {
1560 /* we only allow an increase in the number of worker threads */
1561 if (new_value > aio_worker_threads ) {
1562 _aio_create_worker_threads((new_value - aio_worker_threads));
1563 aio_worker_threads = new_value;
1564 }
1565 else
1566 error = EINVAL;
1567 }
1568 return(error);
1569 }
1570
1571
1572 /*
1573 * System-wide limit on the max number of processes
1574 */
1575 STATIC int
1576 sysctl_maxproc
1577 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1578 {
1579 int new_value, changed;
1580 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1581 if (changed) {
1582 AUDIT_ARG(value32, new_value);
1583 /* make sure the system-wide limit is less than the configured hard
1584 limit set at kernel compilation */
1585 if (new_value <= hard_maxproc && new_value > 0)
1586 maxproc = new_value;
1587 else
1588 error = EINVAL;
1589 }
1590 return(error);
1591 }
1592
1593 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1594 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1595 ostype, 0, "");
1596 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1597 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1598 osrelease, 0, "");
1599 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1600 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1601 (int *)NULL, BSD, "");
1602 SYSCTL_STRING(_kern, KERN_VERSION, version,
1603 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1604 version, 0, "");
1605 SYSCTL_STRING(_kern, OID_AUTO, uuid,
1606 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1607 &kernel_uuid_string[0], 0, "");
1608
1609 #if DEBUG
1610 int debug_kprint_syscall = 0;
1611 char debug_kprint_syscall_process[MAXCOMLEN+1];
1612
1613 /* Thread safe: bits and string value are not used to reclaim state */
1614 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1615 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1616 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1617 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1618 "name of process for kprintf syscall tracing");
1619
1620 int debug_kprint_current_process(const char **namep)
1621 {
1622 struct proc *p = current_proc();
1623
1624 if (p == NULL) {
1625 return 0;
1626 }
1627
1628 if (debug_kprint_syscall_process[0]) {
1629 /* user asked to scope tracing to a particular process name */
1630 if(0 == strncmp(debug_kprint_syscall_process,
1631 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1632 /* no value in telling the user that we traced what they asked */
1633 if(namep) *namep = NULL;
1634
1635 return 1;
1636 } else {
1637 return 0;
1638 }
1639 }
1640
1641 /* trace all processes. Tell user what we traced */
1642 if (namep) {
1643 *namep = p->p_comm;
1644 }
1645
1646 return 1;
1647 }
1648 #endif
1649
1650 /* PR-5293665: need to use a callback function for kern.osversion to set
1651 * osversion in IORegistry */
1652
1653 STATIC int
1654 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1655 {
1656 int rval = 0;
1657
1658 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1659
1660 if (req->newptr) {
1661 IORegistrySetOSBuildVersion((char *)arg1);
1662 }
1663
1664 return rval;
1665 }
1666
1667 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1668 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1669 osversion, 256 /* OSVERSIZE*/,
1670 sysctl_osversion, "A", "");
1671
1672 STATIC int
1673 sysctl_sysctl_bootargs
1674 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1675 {
1676 int error;
1677 char buf[256];
1678
1679 strlcpy(buf, PE_boot_args(), 256);
1680 error = sysctl_io_string(req, buf, 256, 0, NULL);
1681 return(error);
1682 }
1683
1684 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1685 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1686 NULL, 0,
1687 sysctl_sysctl_bootargs, "A", "bootargs");
1688
1689 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1690 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1691 &maxfiles, 0, "");
1692 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1693 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1694 (int *)NULL, ARG_MAX, "");
1695 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1696 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1697 (int *)NULL, _POSIX_VERSION, "");
1698 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1699 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1700 (int *)NULL, NGROUPS_MAX, "");
1701 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1702 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1703 (int *)NULL, 1, "");
1704 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1705 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1706 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1707 (int *)NULL, 1, "");
1708 #else
1709 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1710 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1711 NULL, 0, "");
1712 #endif
1713 SYSCTL_INT(_kern, OID_AUTO, num_files,
1714 CTLFLAG_RD | CTLFLAG_LOCKED,
1715 &nfiles, 0, "");
1716 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1717 CTLFLAG_RD | CTLFLAG_LOCKED,
1718 &numvnodes, 0, "");
1719 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1720 CTLFLAG_RD | CTLFLAG_LOCKED,
1721 &task_max, 0, "");
1722 SYSCTL_INT(_kern, OID_AUTO, num_threads,
1723 CTLFLAG_RD | CTLFLAG_LOCKED,
1724 &thread_max, 0, "");
1725 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1726 CTLFLAG_RD | CTLFLAG_LOCKED,
1727 &task_threadmax, 0, "");
1728
1729 STATIC int
1730 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1731 {
1732 int oldval = desiredvnodes;
1733 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1734
1735 if (oldval != desiredvnodes) {
1736 reset_vmobjectcache(oldval, desiredvnodes);
1737 resize_namecache(desiredvnodes);
1738 }
1739
1740 return(error);
1741 }
1742
1743 SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1744 CTLFLAG_RW | CTLFLAG_LOCKED,
1745 &nc_disabled, 0, "");
1746
1747 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1748 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1749 0, 0, sysctl_maxvnodes, "I", "");
1750
1751 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1752 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1753 0, 0, sysctl_maxproc, "I", "");
1754
1755 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1756 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1757 0, 0, sysctl_aiomax, "I", "");
1758
1759 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1760 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1761 0, 0, sysctl_aioprocmax, "I", "");
1762
1763 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1764 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1765 0, 0, sysctl_aiothreads, "I", "");
1766
1767 #if (DEVELOPMENT || DEBUG)
1768 extern int sched_smt_balance;
1769 SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1770 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1771 &sched_smt_balance, 0, "");
1772 #endif
1773
1774 STATIC int
1775 sysctl_securelvl
1776 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1777 {
1778 int new_value, changed;
1779 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1780 if (changed) {
1781 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1782 proc_list_lock();
1783 securelevel = new_value;
1784 proc_list_unlock();
1785 } else {
1786 error = EPERM;
1787 }
1788 }
1789 return(error);
1790 }
1791
1792 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1793 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1794 0, 0, sysctl_securelvl, "I", "");
1795
1796
1797 STATIC int
1798 sysctl_domainname
1799 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1800 {
1801 int error, changed;
1802 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1803 if (changed) {
1804 domainnamelen = strlen(domainname);
1805 }
1806 return(error);
1807 }
1808
1809 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1810 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1811 0, 0, sysctl_domainname, "A", "");
1812
1813 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1814 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1815 &hostid, 0, "");
1816
1817 STATIC int
1818 sysctl_hostname
1819 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1820 {
1821 int error, changed;
1822 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1823 if (changed) {
1824 hostnamelen = req->newlen;
1825 }
1826 return(error);
1827 }
1828
1829
1830 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1831 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1832 0, 0, sysctl_hostname, "A", "");
1833
1834 STATIC int
1835 sysctl_procname
1836 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1837 {
1838 /* Original code allowed writing, I'm copying this, although this all makes
1839 no sense to me. Besides, this sysctl is never used. */
1840 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1841 }
1842
1843 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
1844 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1845 0, 0, sysctl_procname, "A", "");
1846
1847 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
1848 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1849 &speculative_reads_disabled, 0, "");
1850
1851 SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
1852 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1853 &ignore_is_ssd, 0, "");
1854
1855 SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
1856 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1857 &preheat_max_bytes, 0, "");
1858
1859 SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
1860 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1861 &preheat_min_bytes, 0, "");
1862
1863 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1864 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1865 &speculative_prefetch_max, 0, "");
1866
1867 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
1868 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1869 &speculative_prefetch_max_iosize, 0, "");
1870
1871 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
1872 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1873 &vm_page_free_target, 0, "");
1874
1875 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
1876 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1877 &vm_page_free_min, 0, "");
1878
1879 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
1880 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1881 &vm_page_free_reserved, 0, "");
1882
1883 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
1884 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1885 &vm_page_speculative_percentage, 0, "");
1886
1887 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
1888 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1889 &vm_page_speculative_q_age_ms, 0, "");
1890
1891 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
1892 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1893 &vm_max_delayed_work_limit, 0, "");
1894
1895 SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
1896 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1897 &vm_max_batch, 0, "");
1898
1899 SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
1900 CTLFLAG_RD | CTLFLAG_LOCKED,
1901 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
1902
1903 STATIC int
1904 sysctl_boottime
1905 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1906 {
1907 struct timeval tv;
1908 boottime_timeval(&tv);
1909 struct proc *p = req->p;
1910
1911 if (proc_is64bit(p)) {
1912 struct user64_timeval t;
1913 t.tv_sec = tv.tv_sec;
1914 t.tv_usec = tv.tv_usec;
1915 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1916 } else {
1917 struct user32_timeval t;
1918 t.tv_sec = tv.tv_sec;
1919 t.tv_usec = tv.tv_usec;
1920 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1921 }
1922 }
1923
1924 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
1925 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1926 0, 0, sysctl_boottime, "S,timeval", "");
1927
1928 STATIC int
1929 sysctl_symfile
1930 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1931 {
1932 char *str;
1933 int error = get_kernel_symfile(req->p, &str);
1934 if (error)
1935 return (error);
1936 return sysctl_io_string(req, str, 0, 0, NULL);
1937 }
1938
1939
1940 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
1941 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
1942 0, 0, sysctl_symfile, "A", "");
1943
1944 #if NFSCLIENT
1945 STATIC int
1946 sysctl_netboot
1947 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1948 {
1949 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
1950 }
1951
1952 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
1953 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1954 0, 0, sysctl_netboot, "I", "");
1955 #endif
1956
1957 #ifdef CONFIG_IMGSRC_ACCESS
1958 /*
1959 * Legacy--act as if only one layer of nesting is possible.
1960 */
1961 STATIC int
1962 sysctl_imgsrcdev
1963 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1964 {
1965 vfs_context_t ctx = vfs_context_current();
1966 vnode_t devvp;
1967 int result;
1968
1969 if (!vfs_context_issuser(ctx)) {
1970 return EPERM;
1971 }
1972
1973 if (imgsrc_rootvnodes[0] == NULL) {
1974 return ENOENT;
1975 }
1976
1977 result = vnode_getwithref(imgsrc_rootvnodes[0]);
1978 if (result != 0) {
1979 return result;
1980 }
1981
1982 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
1983 result = vnode_getwithref(devvp);
1984 if (result != 0) {
1985 goto out;
1986 }
1987
1988 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
1989
1990 vnode_put(devvp);
1991 out:
1992 vnode_put(imgsrc_rootvnodes[0]);
1993 return result;
1994 }
1995
1996 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
1997 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1998 0, 0, sysctl_imgsrcdev, "I", "");
1999
2000 STATIC int
2001 sysctl_imgsrcinfo
2002 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2003 {
2004 int error;
2005 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
2006 uint32_t i;
2007 vnode_t rvp, devvp;
2008
2009 if (imgsrc_rootvnodes[0] == NULLVP) {
2010 return ENXIO;
2011 }
2012
2013 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2014 /*
2015 * Go get the root vnode.
2016 */
2017 rvp = imgsrc_rootvnodes[i];
2018 if (rvp == NULLVP) {
2019 break;
2020 }
2021
2022 error = vnode_get(rvp);
2023 if (error != 0) {
2024 return error;
2025 }
2026
2027 /*
2028 * For now, no getting at a non-local volume.
2029 */
2030 devvp = vnode_mount(rvp)->mnt_devvp;
2031 if (devvp == NULL) {
2032 vnode_put(rvp);
2033 return EINVAL;
2034 }
2035
2036 error = vnode_getwithref(devvp);
2037 if (error != 0) {
2038 vnode_put(rvp);
2039 return error;
2040 }
2041
2042 /*
2043 * Fill in info.
2044 */
2045 info[i].ii_dev = vnode_specrdev(devvp);
2046 info[i].ii_flags = 0;
2047 info[i].ii_height = i;
2048 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2049
2050 vnode_put(devvp);
2051 vnode_put(rvp);
2052 }
2053
2054 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2055 }
2056
2057 SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2058 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2059 0, 0, sysctl_imgsrcinfo, "I", "");
2060
2061 #endif /* CONFIG_IMGSRC_ACCESS */
2062
2063
2064 SYSCTL_DECL(_kern_timer);
2065 SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2066
2067
2068 SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2069 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2070 &mach_timer_coalescing_enabled, 0, "");
2071
2072 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2073 CTLFLAG_RW | CTLFLAG_LOCKED,
2074 &timer_deadline_tracking_bin_1, "");
2075 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2076 CTLFLAG_RW | CTLFLAG_LOCKED,
2077 &timer_deadline_tracking_bin_2, "");
2078
2079 SYSCTL_DECL(_kern_timer_longterm);
2080 SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2081
2082
2083 /* Must match definition in osfmk/kern/timer_call.c */
2084 enum {
2085 THRESHOLD, QCOUNT,
2086 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2087 LATENCY, LATENCY_MIN, LATENCY_MAX
2088 };
2089 extern uint64_t timer_sysctl_get(int);
2090 extern int timer_sysctl_set(int, uint64_t);
2091
2092 STATIC int
2093 sysctl_timer
2094 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2095 {
2096 int oid = (int)arg1;
2097 uint64_t value = timer_sysctl_get(oid);
2098 uint64_t new_value;
2099 int error;
2100 int changed;
2101
2102 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2103 if (changed)
2104 error = timer_sysctl_set(oid, new_value);
2105
2106 return error;
2107 }
2108
2109 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2110 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2111 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2112 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2113 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2114 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
2115 #if DEBUG
2116 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2117 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2118 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2119 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2120 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2121 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2122 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2123 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2124 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2125 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2126 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2127 (void *) SCANS, 0, sysctl_timer, "Q", "");
2128 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2129 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2130 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2131 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2132 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2133 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2134 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2135 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2136 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2137 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2138 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2139 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2140 #endif /* DEBUG */
2141
2142 STATIC int
2143 sysctl_usrstack
2144 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2145 {
2146 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2147 }
2148
2149 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2150 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2151 0, 0, sysctl_usrstack, "I", "");
2152
2153 STATIC int
2154 sysctl_usrstack64
2155 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2156 {
2157 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2158 }
2159
2160 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2161 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2162 0, 0, sysctl_usrstack64, "Q", "");
2163
2164 #if CONFIG_COREDUMP
2165
2166 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2167 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2168 corefilename, sizeof(corefilename), "");
2169
2170 STATIC int
2171 sysctl_coredump
2172 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2173 {
2174 #ifdef SECURE_KERNEL
2175 (void)req;
2176 return (ENOTSUP);
2177 #else
2178 int new_value, changed;
2179 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2180 if (changed) {
2181 if ((new_value == 0) || (new_value == 1))
2182 do_coredump = new_value;
2183 else
2184 error = EINVAL;
2185 }
2186 return(error);
2187 #endif
2188 }
2189
2190 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2191 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2192 0, 0, sysctl_coredump, "I", "");
2193
2194 STATIC int
2195 sysctl_suid_coredump
2196 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2197 {
2198 #ifdef SECURE_KERNEL
2199 (void)req;
2200 return (ENOTSUP);
2201 #else
2202 int new_value, changed;
2203 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2204 if (changed) {
2205 if ((new_value == 0) || (new_value == 1))
2206 sugid_coredump = new_value;
2207 else
2208 error = EINVAL;
2209 }
2210 return(error);
2211 #endif
2212 }
2213
2214 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2215 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2216 0, 0, sysctl_suid_coredump, "I", "");
2217
2218 #endif /* CONFIG_COREDUMP */
2219
2220 STATIC int
2221 sysctl_delayterm
2222 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2223 {
2224 struct proc *p = req->p;
2225 int new_value, changed;
2226 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2227 if (changed) {
2228 proc_lock(p);
2229 if (new_value)
2230 req->p->p_lflag |= P_LDELAYTERM;
2231 else
2232 req->p->p_lflag &= ~P_LDELAYTERM;
2233 proc_unlock(p);
2234 }
2235 return(error);
2236 }
2237
2238 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2239 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2240 0, 0, sysctl_delayterm, "I", "");
2241
2242
2243 STATIC int
2244 sysctl_rage_vnode
2245 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2246 {
2247 struct proc *p = req->p;
2248 struct uthread *ut;
2249 int new_value, old_value, changed;
2250 int error;
2251
2252 ut = get_bsdthread_info(current_thread());
2253
2254 if (ut->uu_flag & UT_RAGE_VNODES)
2255 old_value = KERN_RAGE_THREAD;
2256 else if (p->p_lflag & P_LRAGE_VNODES)
2257 old_value = KERN_RAGE_PROC;
2258 else
2259 old_value = 0;
2260
2261 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2262
2263 if (error == 0) {
2264 switch (new_value) {
2265 case KERN_RAGE_PROC:
2266 proc_lock(p);
2267 p->p_lflag |= P_LRAGE_VNODES;
2268 proc_unlock(p);
2269 break;
2270 case KERN_UNRAGE_PROC:
2271 proc_lock(p);
2272 p->p_lflag &= ~P_LRAGE_VNODES;
2273 proc_unlock(p);
2274 break;
2275
2276 case KERN_RAGE_THREAD:
2277 ut->uu_flag |= UT_RAGE_VNODES;
2278 break;
2279 case KERN_UNRAGE_THREAD:
2280 ut = get_bsdthread_info(current_thread());
2281 ut->uu_flag &= ~UT_RAGE_VNODES;
2282 break;
2283 }
2284 }
2285 return(error);
2286 }
2287
2288 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2289 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2290 0, 0, sysctl_rage_vnode, "I", "");
2291
2292 /* XXX move this interface into libproc and remove this sysctl */
2293 STATIC int
2294 sysctl_setthread_cpupercent
2295 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2296 {
2297 int new_value, old_value;
2298 int error = 0;
2299 kern_return_t kret = KERN_SUCCESS;
2300 uint8_t percent = 0;
2301 int ms_refill = 0;
2302
2303 if (!req->newptr)
2304 return (0);
2305
2306 old_value = 0;
2307
2308 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2309 return (error);
2310
2311 percent = new_value & 0xff; /* low 8 bytes for perent */
2312 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2313 if (percent > 100)
2314 return (EINVAL);
2315
2316 /*
2317 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2318 */
2319 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2320 return (EIO);
2321
2322 return (0);
2323 }
2324
2325 SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2326 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2327 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2328
2329
2330 STATIC int
2331 sysctl_kern_check_openevt
2332 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2333 {
2334 struct proc *p = req->p;
2335 int new_value, old_value, changed;
2336 int error;
2337
2338 if (p->p_flag & P_CHECKOPENEVT) {
2339 old_value = KERN_OPENEVT_PROC;
2340 } else {
2341 old_value = 0;
2342 }
2343
2344 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2345
2346 if (error == 0) {
2347 switch (new_value) {
2348 case KERN_OPENEVT_PROC:
2349 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2350 break;
2351
2352 case KERN_UNOPENEVT_PROC:
2353 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2354 break;
2355
2356 default:
2357 error = EINVAL;
2358 }
2359 }
2360 return(error);
2361 }
2362
2363 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2364 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2365
2366
2367
2368 STATIC int
2369 sysctl_nx
2370 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2371 {
2372 #ifdef SECURE_KERNEL
2373 (void)req;
2374 return ENOTSUP;
2375 #else
2376 int new_value, changed;
2377 int error;
2378
2379 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2380 if (error)
2381 return error;
2382
2383 if (changed) {
2384 #if defined(__i386__) || defined(__x86_64__)
2385 /*
2386 * Only allow setting if NX is supported on the chip
2387 */
2388 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2389 return ENOTSUP;
2390 #endif
2391 nx_enabled = new_value;
2392 }
2393 return(error);
2394 #endif /* SECURE_KERNEL */
2395 }
2396
2397
2398
2399 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2400 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2401 0, 0, sysctl_nx, "I", "");
2402
2403 STATIC int
2404 sysctl_loadavg
2405 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2406 {
2407 if (proc_is64bit(req->p)) {
2408 struct user64_loadavg loadinfo64;
2409 fill_loadavg64(&averunnable, &loadinfo64);
2410 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2411 } else {
2412 struct user32_loadavg loadinfo32;
2413 fill_loadavg32(&averunnable, &loadinfo32);
2414 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2415 }
2416 }
2417
2418 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2419 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2420 0, 0, sysctl_loadavg, "S,loadavg", "");
2421
2422 /*
2423 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2424 */
2425 STATIC int
2426 sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2427 __unused int arg2, struct sysctl_req *req)
2428 {
2429 int old_value=0, new_value=0, error=0;
2430
2431 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2432 return(error);
2433 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2434 if (!error) {
2435 return (vm_toggle_entry_reuse(new_value, NULL));
2436 }
2437 return(error);
2438 }
2439
2440 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2441
2442
2443 STATIC int
2444 sysctl_swapusage
2445 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2446 {
2447 int error;
2448 uint64_t swap_total;
2449 uint64_t swap_avail;
2450 vm_size_t swap_pagesize;
2451 boolean_t swap_encrypted;
2452 struct xsw_usage xsu;
2453
2454 error = macx_swapinfo(&swap_total,
2455 &swap_avail,
2456 &swap_pagesize,
2457 &swap_encrypted);
2458 if (error)
2459 return error;
2460
2461 xsu.xsu_total = swap_total;
2462 xsu.xsu_avail = swap_avail;
2463 xsu.xsu_used = swap_total - swap_avail;
2464 xsu.xsu_pagesize = swap_pagesize;
2465 xsu.xsu_encrypted = swap_encrypted;
2466 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2467 }
2468
2469
2470
2471 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2472 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2473 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2474
2475 #if CONFIG_FREEZE
2476 extern void vm_page_reactivate_all_throttled(void);
2477
2478 static int
2479 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2480 {
2481 #pragma unused(arg1, arg2)
2482 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2483 boolean_t disabled;
2484
2485 error = sysctl_handle_int(oidp, &val, 0, req);
2486 if (error || !req->newptr)
2487 return (error);
2488
2489 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
2490 //assert(req->newptr);
2491 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2492 return EINVAL;
2493 }
2494
2495 /*
2496 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2497 */
2498 disabled = (!val && memorystatus_freeze_enabled);
2499
2500 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2501
2502 if (disabled) {
2503 vm_page_reactivate_all_throttled();
2504 }
2505
2506 return (0);
2507 }
2508
2509 SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2510 #endif /* CONFIG_FREEZE */
2511
2512 /* this kernel does NOT implement shared_region_make_private_np() */
2513 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2514 CTLFLAG_RD | CTLFLAG_LOCKED,
2515 (int *)NULL, 0, "");
2516
2517 STATIC int
2518 fetch_process_cputype(
2519 proc_t cur_proc,
2520 int *name,
2521 u_int namelen,
2522 cpu_type_t *cputype)
2523 {
2524 proc_t p = PROC_NULL;
2525 int refheld = 0;
2526 cpu_type_t ret = 0;
2527 int error = 0;
2528
2529 if (namelen == 0)
2530 p = cur_proc;
2531 else if (namelen == 1) {
2532 p = proc_find(name[0]);
2533 if (p == NULL)
2534 return (EINVAL);
2535 refheld = 1;
2536 } else {
2537 error = EINVAL;
2538 goto out;
2539 }
2540
2541 ret = cpu_type() & ~CPU_ARCH_MASK;
2542 if (IS_64BIT_PROCESS(p))
2543 ret |= CPU_ARCH_ABI64;
2544
2545 *cputype = ret;
2546
2547 if (refheld != 0)
2548 proc_rele(p);
2549 out:
2550 return (error);
2551 }
2552
2553 STATIC int
2554 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2555 struct sysctl_req *req)
2556 {
2557 int error;
2558 cpu_type_t proc_cputype = 0;
2559 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2560 return error;
2561 int res = 1;
2562 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2563 res = 0;
2564 return SYSCTL_OUT(req, &res, sizeof(res));
2565 }
2566 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2567
2568 STATIC int
2569 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2570 struct sysctl_req *req)
2571 {
2572 int error;
2573 cpu_type_t proc_cputype = 0;
2574 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2575 return error;
2576 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2577 }
2578 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2579
2580 STATIC int
2581 sysctl_safeboot
2582 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2583 {
2584 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2585 }
2586
2587 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2588 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2589 0, 0, sysctl_safeboot, "I", "");
2590
2591 STATIC int
2592 sysctl_singleuser
2593 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2594 {
2595 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2596 }
2597
2598 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2599 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2600 0, 0, sysctl_singleuser, "I", "");
2601
2602 STATIC int sysctl_minimalboot
2603 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2604 {
2605 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
2606 }
2607
2608 SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
2609 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2610 0, 0, sysctl_minimalboot, "I", "");
2611
2612 /*
2613 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2614 */
2615 extern boolean_t affinity_sets_enabled;
2616 extern int affinity_sets_mapping;
2617
2618 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2619 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2620 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2621 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2622
2623 /*
2624 * Boolean indicating if KASLR is active.
2625 */
2626 STATIC int
2627 sysctl_slide
2628 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2629 {
2630 uint32_t slide;
2631
2632 slide = vm_kernel_slide ? 1 : 0;
2633
2634 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2635 }
2636
2637 SYSCTL_PROC(_kern, OID_AUTO, slide,
2638 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2639 0, 0, sysctl_slide, "I", "");
2640
2641 /*
2642 * Limit on total memory users can wire.
2643 *
2644 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2645 *
2646 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2647 *
2648 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2649 * kmem_init().
2650 *
2651 * All values are in bytes.
2652 */
2653
2654 vm_map_size_t vm_global_no_user_wire_amount;
2655 vm_map_size_t vm_global_user_wire_limit;
2656 vm_map_size_t vm_user_wire_limit;
2657
2658 /*
2659 * There needs to be a more automatic/elegant way to do this
2660 */
2661 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2662 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2663 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
2664
2665 extern int vm_map_copy_overwrite_aligned_src_not_internal;
2666 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2667 extern int vm_map_copy_overwrite_aligned_src_large;
2668 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2669 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2670 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
2671
2672
2673 extern uint32_t vm_page_external_count;
2674 extern uint32_t vm_page_filecache_min;
2675
2676 SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2677 SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2678
2679 extern int vm_compressor_mode;
2680 extern int vm_compressor_is_active;
2681 extern int vm_compressor_available;
2682 extern uint32_t vm_ripe_target_age;
2683 extern uint32_t swapout_target_age;
2684 extern int64_t compressor_bytes_used;
2685 extern int64_t c_segment_input_bytes;
2686 extern int64_t c_segment_compressed_bytes;
2687 extern uint32_t compressor_eval_period_in_msecs;
2688 extern uint32_t compressor_sample_min_in_msecs;
2689 extern uint32_t compressor_sample_max_in_msecs;
2690 extern uint32_t compressor_thrashing_threshold_per_10msecs;
2691 extern uint32_t compressor_thrashing_min_per_10msecs;
2692 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
2693 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
2694 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
2695 extern uint32_t vm_compressor_catchup_threshold_divisor;
2696 extern uint32_t vm_compressor_time_thread;
2697 extern uint64_t vm_compressor_thread_runtime;
2698
2699 SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
2700 SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
2701 SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2702
2703 SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
2704 SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
2705 SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
2706 SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
2707
2708 SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
2709
2710 SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2711 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2712 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2713 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2714 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2715 SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2716 SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2717 SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2718 SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2719
2720 SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2721
2722 SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
2723 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_thread_runtime, "");
2724
2725 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
2726 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
2727 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
2728 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
2729 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
2730
2731 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
2732 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
2733
2734 SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
2735
2736 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
2737 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
2738 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
2739 SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
2740 SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
2741 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
2742 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
2743
2744 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
2745 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
2746 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
2747
2748 SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
2749 SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
2750 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
2751 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
2752 SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
2753 SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
2754 SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
2755 SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
2756
2757 #if CONFIG_PHANTOM_CACHE
2758 extern uint32_t phantom_cache_thrashing_threshold;
2759 extern uint32_t phantom_cache_eval_period_in_msecs;
2760 extern uint32_t phantom_cache_thrashing_threshold_ssd;
2761
2762
2763 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2764 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2765 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2766 #endif
2767
2768 #if CONFIG_BACKGROUND_QUEUE
2769
2770 extern uint32_t vm_page_background_count;
2771 extern uint32_t vm_page_background_limit;
2772 extern uint32_t vm_page_background_target;
2773 extern uint32_t vm_page_background_internal_count;
2774 extern uint32_t vm_page_background_external_count;
2775 extern uint32_t vm_page_background_mode;
2776 extern uint32_t vm_page_background_exclude_external;
2777 extern uint64_t vm_page_background_promoted_count;
2778 extern uint64_t vm_pageout_considered_bq_internal;
2779 extern uint64_t vm_pageout_considered_bq_external;
2780 extern uint64_t vm_pageout_rejected_bq_internal;
2781 extern uint64_t vm_pageout_rejected_bq_external;
2782
2783 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
2784 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
2785 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_limit, 0, "");
2786 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
2787 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
2788 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
2789 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
2790
2791 SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
2792 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_internal, "");
2793 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_external, "");
2794 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
2795 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
2796
2797 #endif
2798
2799 #if (DEVELOPMENT || DEBUG)
2800
2801 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
2802 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2803 &vm_page_creation_throttled_hard, 0, "");
2804
2805 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
2806 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2807 &vm_page_creation_throttled_soft, 0, "");
2808
2809 extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
2810 extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
2811 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
2812 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
2813
2814 extern uint32_t vm_grab_anon_overrides;
2815 extern uint32_t vm_grab_anon_nops;
2816
2817 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_overrides, 0, "");
2818 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_nops, 0, "");
2819
2820 /* log message counters for persistence mode */
2821 extern uint32_t oslog_p_total_msgcount;
2822 extern uint32_t oslog_p_metadata_saved_msgcount;
2823 extern uint32_t oslog_p_metadata_dropped_msgcount;
2824 extern uint32_t oslog_p_error_count;
2825 extern uint32_t oslog_p_saved_msgcount;
2826 extern uint32_t oslog_p_dropped_msgcount;
2827 extern uint32_t oslog_p_boot_dropped_msgcount;
2828
2829 /* log message counters for streaming mode */
2830 extern uint32_t oslog_s_total_msgcount;
2831 extern uint32_t oslog_s_metadata_msgcount;
2832 extern uint32_t oslog_s_error_count;
2833 extern uint32_t oslog_s_streamed_msgcount;
2834 extern uint32_t oslog_s_dropped_msgcount;
2835
2836 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
2837 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
2838 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
2839 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
2840 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
2841 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
2842 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
2843
2844 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
2845 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
2846 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
2847 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
2848 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
2849
2850
2851 #endif /* DEVELOPMENT || DEBUG */
2852
2853 /*
2854 * Enable tracing of voucher contents
2855 */
2856 extern uint32_t ipc_voucher_trace_contents;
2857
2858 SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
2859 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
2860
2861 /*
2862 * Kernel stack size and depth
2863 */
2864 SYSCTL_INT (_kern, OID_AUTO, stack_size,
2865 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
2866 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
2867 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
2868
2869 /*
2870 * enable back trace for port allocations
2871 */
2872 extern int ipc_portbt;
2873
2874 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
2875 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2876 &ipc_portbt, 0, "");
2877
2878 /*
2879 * Scheduler sysctls
2880 */
2881
2882 SYSCTL_STRING(_kern, OID_AUTO, sched,
2883 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2884 sched_string, sizeof(sched_string),
2885 "Timeshare scheduler implementation");
2886
2887 /*
2888 * Only support runtime modification on embedded platforms
2889 * with development config enabled
2890 */
2891
2892
2893 /* Parameters related to timer coalescing tuning, to be replaced
2894 * with a dedicated systemcall in the future.
2895 */
2896 /* Enable processing pending timers in the context of any other interrupt
2897 * Coalescing tuning parameters for various thread/task attributes */
2898 STATIC int
2899 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2900 {
2901 #pragma unused(oidp)
2902 int size = arg2; /* subcommand*/
2903 int error;
2904 int changed = 0;
2905 uint64_t old_value_ns;
2906 uint64_t new_value_ns;
2907 uint64_t value_abstime;
2908 if (size == sizeof(uint32_t))
2909 value_abstime = *((uint32_t *)arg1);
2910 else if (size == sizeof(uint64_t))
2911 value_abstime = *((uint64_t *)arg1);
2912 else return ENOTSUP;
2913
2914 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
2915 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
2916 if ((error) || (!changed))
2917 return error;
2918
2919 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
2920 if (size == sizeof(uint32_t))
2921 *((uint32_t *)arg1) = (uint32_t)value_abstime;
2922 else
2923 *((uint64_t *)arg1) = value_abstime;
2924 return error;
2925 }
2926
2927 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
2928 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2929 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
2930 SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
2931 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2932 &tcoal_prio_params.timer_resort_threshold_abstime,
2933 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
2934 sysctl_timer_user_us_kernel_abstime,
2935 "Q", "");
2936 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
2937 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2938 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
2939 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
2940 sysctl_timer_user_us_kernel_abstime,
2941 "Q", "");
2942
2943 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
2944 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2945 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
2946
2947 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
2948 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2949 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
2950 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
2951 sysctl_timer_user_us_kernel_abstime,
2952 "Q", "");
2953
2954 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
2955 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2956 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
2957
2958 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
2959 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2960 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
2961 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
2962 sysctl_timer_user_us_kernel_abstime,
2963 "Q", "");
2964
2965 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
2966 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2967 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
2968
2969 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
2970 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2971 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
2972 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
2973 sysctl_timer_user_us_kernel_abstime,
2974 "Q", "");
2975
2976 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
2977 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2978 &tcoal_prio_params.latency_qos_scale[0], 0, "");
2979
2980 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
2981 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2982 &tcoal_prio_params.latency_qos_abstime_max[0],
2983 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
2984 sysctl_timer_user_us_kernel_abstime,
2985 "Q", "");
2986
2987 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
2988 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2989 &tcoal_prio_params.latency_qos_scale[1], 0, "");
2990
2991 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
2992 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2993 &tcoal_prio_params.latency_qos_abstime_max[1],
2994 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
2995 sysctl_timer_user_us_kernel_abstime,
2996 "Q", "");
2997
2998 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
2999 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3000 &tcoal_prio_params.latency_qos_scale[2], 0, "");
3001
3002 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
3003 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3004 &tcoal_prio_params.latency_qos_abstime_max[2],
3005 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
3006 sysctl_timer_user_us_kernel_abstime,
3007 "Q", "");
3008
3009 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
3010 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3011 &tcoal_prio_params.latency_qos_scale[3], 0, "");
3012
3013 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
3014 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3015 &tcoal_prio_params.latency_qos_abstime_max[3],
3016 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
3017 sysctl_timer_user_us_kernel_abstime,
3018 "Q", "");
3019
3020 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
3021 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3022 &tcoal_prio_params.latency_qos_scale[4], 0, "");
3023
3024 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
3025 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3026 &tcoal_prio_params.latency_qos_abstime_max[4],
3027 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
3028 sysctl_timer_user_us_kernel_abstime,
3029 "Q", "");
3030
3031 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
3032 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3033 &tcoal_prio_params.latency_qos_scale[5], 0, "");
3034
3035 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
3036 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3037 &tcoal_prio_params.latency_qos_abstime_max[5],
3038 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
3039 sysctl_timer_user_us_kernel_abstime,
3040 "Q", "");
3041
3042 /* Communicate the "user idle level" heuristic to the timer layer, and
3043 * potentially other layers in the future.
3044 */
3045
3046 static int
3047 timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3048 int new_value = 0, old_value = 0, changed = 0, error;
3049
3050 old_value = timer_get_user_idle_level();
3051
3052 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3053
3054 if (error == 0 && changed) {
3055 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
3056 error = ERANGE;
3057 }
3058
3059 return error;
3060 }
3061
3062 SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
3063 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3064 0, 0,
3065 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
3066
3067 #if HYPERVISOR
3068 SYSCTL_INT(_kern, OID_AUTO, hv_support,
3069 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
3070 &hv_support_available, 0, "");
3071 #endif
3072
3073
3074 /*
3075 * This is set by core audio to tell tailspin (ie background tracing) how long
3076 * its smallest buffer is. Background tracing can then try to make a reasonable
3077 * decisions to try to avoid introducing so much latency that the buffers will
3078 * underflow.
3079 */
3080
3081 int min_audio_buffer_usec;
3082
3083 STATIC int
3084 sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3085 {
3086 #pragma unused(oidp, arg1, arg2)
3087 int err = 0, value = 0, changed = 0;
3088 err = sysctl_io_number(req, min_audio_buffer_usec, sizeof(int), &value, &changed);
3089 if (err) goto exit;
3090
3091 if (changed) {
3092 /* writing is protected by an entitlement */
3093 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY, 0) != 0) {
3094 err = EPERM;
3095 goto exit;
3096 }
3097 min_audio_buffer_usec = value;
3098 }
3099 exit:
3100 return err;
3101 }
3102
3103 SYSCTL_PROC(_kern, OID_AUTO, min_audio_buffer_usec, CTLFLAG_RW | CTLFLAG_ANYBODY, 0, 0, sysctl_audio_buffer, "I", "Minimum audio buffer size, in microseconds");
3104
3105 #if DEVELOPMENT || DEBUG
3106 #include <sys/sysent.h>
3107 /* This should result in a fatal exception, verifying that "sysent" is
3108 * write-protected.
3109 */
3110 static int
3111 kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3112 uint64_t new_value = 0, old_value = 0;
3113 int changed = 0, error;
3114
3115 error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
3116 if ((error == 0) && changed) {
3117 volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
3118 *wraddr = 0;
3119 printf("sysent[0] write succeeded\n");
3120 }
3121 return error;
3122 }
3123
3124 SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
3125 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3126 0, 0,
3127 kern_sysent_write, "I", "Attempt sysent[0] write");
3128
3129 #endif
3130
3131 #if DEVELOPMENT || DEBUG
3132 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
3133 #else
3134 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
3135 #endif