]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107
108 #include <security/audit/audit.h>
109 #include <kern/kalloc.h>
110
111 #include <mach/machine.h>
112 #include <mach/mach_host.h>
113 #include <mach/mach_types.h>
114 #include <mach/vm_param.h>
115 #include <kern/mach_param.h>
116 #include <kern/task.h>
117 #include <kern/thread.h>
118 #include <kern/processor.h>
119 #include <kern/debug.h>
120 #include <kern/sched_prim.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_map.h>
123 #include <mach/host_info.h>
124
125 #include <sys/mount_internal.h>
126 #include <sys/kdebug.h>
127
128 #include <IOKit/IOPlatformExpert.h>
129 #include <pexpert/pexpert.h>
130
131 #include <machine/machine_routines.h>
132 #include <machine/exec.h>
133
134 #include <vm/vm_protos.h>
135 #include <vm/vm_pageout.h>
136 #include <vm/vm_compressor_algorithms.h>
137 #include <sys/imgsrc.h>
138 #include <kern/timer_call.h>
139
140 #if defined(__i386__) || defined(__x86_64__)
141 #include <i386/cpuid.h>
142 #endif
143
144 #if CONFIG_FREEZE
145 #include <sys/kern_memorystatus.h>
146 #endif
147
148 #if KPERF
149 #include <kperf/kperf.h>
150 #endif
151
152 #if HYPERVISOR
153 #include <kern/hv_support.h>
154 #endif
155
156 /*
157 * deliberately setting max requests to really high number
158 * so that runaway settings do not cause MALLOC overflows
159 */
160 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
161
162 extern int aio_max_requests;
163 extern int aio_max_requests_per_process;
164 extern int aio_worker_threads;
165 extern int lowpri_IO_window_msecs;
166 extern int lowpri_IO_delay_msecs;
167 extern int nx_enabled;
168 extern int speculative_reads_disabled;
169 extern int ignore_is_ssd;
170 extern unsigned int speculative_prefetch_max;
171 extern unsigned int speculative_prefetch_max_iosize;
172 extern unsigned int preheat_max_bytes;
173 extern unsigned int preheat_min_bytes;
174 extern long numvnodes;
175
176 extern uuid_string_t bootsessionuuid_string;
177
178 extern unsigned int vm_max_delayed_work_limit;
179 extern unsigned int vm_max_batch;
180
181 extern unsigned int vm_page_free_min;
182 extern unsigned int vm_page_free_target;
183 extern unsigned int vm_page_free_reserved;
184 extern unsigned int vm_page_speculative_percentage;
185 extern unsigned int vm_page_speculative_q_age_ms;
186
187 #if (DEVELOPMENT || DEBUG)
188 extern uint32_t vm_page_creation_throttled_hard;
189 extern uint32_t vm_page_creation_throttled_soft;
190 #endif /* DEVELOPMENT || DEBUG */
191
192 /*
193 * Conditionally allow dtrace to see these functions for debugging purposes.
194 */
195 #ifdef STATIC
196 #undef STATIC
197 #endif
198 #if 0
199 #define STATIC
200 #else
201 #define STATIC static
202 #endif
203
204 extern boolean_t mach_timer_coalescing_enabled;
205
206 extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
207
208 STATIC void
209 fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
210 STATIC void
211 fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
212 STATIC void
213 fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
214 STATIC void
215 fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
216 STATIC void
217 fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
218 STATIC void
219 fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
220
221 extern int
222 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
223 #if NFSCLIENT
224 extern int
225 netboot_root(void);
226 #endif
227 int
228 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
229 proc_t p);
230 __private_extern__ kern_return_t
231 reset_vmobjectcache(unsigned int val1, unsigned int val2);
232 int
233 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
234 size_t *sizep, proc_t cur_proc);
235 STATIC int
236 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
237 proc_t cur_proc, int argc_yes);
238 int
239 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
240 size_t newlen, void *sp, int len);
241
242 STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
243 STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
244 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
245 STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
246 STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
247 int sysdoproc_callback(proc_t p, void *arg);
248
249
250 /* forward declarations for non-static STATIC */
251 STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
252 STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
253 STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
254 STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
255 STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
256 STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
257 #if COUNT_SYSCALLS
258 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
259 #endif /* COUNT_SYSCALLS */
260 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
261 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
262 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
263 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
264 STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
265 STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
266 STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
267 STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
268 STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269 STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270 STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271 STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272 STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273 STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274 STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275 STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
276 #if NFSCLIENT
277 STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278 #endif
279 #ifdef CONFIG_IMGSRC_ACCESS
280 STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
281 #endif
282 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
284 #if CONFIG_COREDUMP
285 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287 #endif
288 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291 STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292 STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
293 STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294 STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295 STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
296 STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
300 STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
301 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
302
303
304 extern void IORegistrySetOSBuildVersion(char * build_version);
305
306 STATIC void
307 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
308 {
309 la64->ldavg[0] = la->ldavg[0];
310 la64->ldavg[1] = la->ldavg[1];
311 la64->ldavg[2] = la->ldavg[2];
312 la64->fscale = (user64_long_t)la->fscale;
313 }
314
315 STATIC void
316 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
317 {
318 la32->ldavg[0] = la->ldavg[0];
319 la32->ldavg[1] = la->ldavg[1];
320 la32->ldavg[2] = la->ldavg[2];
321 la32->fscale = (user32_long_t)la->fscale;
322 }
323
324 #if CONFIG_COREDUMP
325 /*
326 * Attributes stored in the kernel.
327 */
328 extern char corefilename[MAXPATHLEN+1];
329 extern int do_coredump;
330 extern int sugid_coredump;
331 #endif
332
333 #if COUNT_SYSCALLS
334 extern int do_count_syscalls;
335 #endif
336
337 #ifdef INSECURE
338 int securelevel = -1;
339 #else
340 int securelevel;
341 #endif
342
343 STATIC int
344 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
345 __unused int arg2, struct sysctl_req *req)
346 {
347 int error;
348 struct uthread *ut = get_bsdthread_info(current_thread());
349 user_addr_t oldp=0, newp=0;
350 size_t *oldlenp=NULL;
351 size_t newlen=0;
352
353 oldp = req->oldptr;
354 oldlenp = &(req->oldlen);
355 newp = req->newptr;
356 newlen = req->newlen;
357
358 /* We want the current length, and maybe the string itself */
359 if(oldlenp) {
360 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
361 size_t currlen = MAXTHREADNAMESIZE - 1;
362
363 if(ut->pth_name)
364 /* use length of current thread name */
365 currlen = strlen(ut->pth_name);
366 if(oldp) {
367 if(*oldlenp < currlen)
368 return ENOMEM;
369 /* NOTE - we do not copy the NULL terminator */
370 if(ut->pth_name) {
371 error = copyout(ut->pth_name,oldp,currlen);
372 if(error)
373 return error;
374 }
375 }
376 /* return length of thread name minus NULL terminator (just like strlen) */
377 req->oldidx = currlen;
378 }
379
380 /* We want to set the name to something */
381 if(newp)
382 {
383 if(newlen > (MAXTHREADNAMESIZE - 1))
384 return ENAMETOOLONG;
385 if(!ut->pth_name)
386 {
387 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
388 if(!ut->pth_name)
389 return ENOMEM;
390 } else {
391 kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
392 }
393 bzero(ut->pth_name, MAXTHREADNAMESIZE);
394 error = copyin(newp, ut->pth_name, newlen);
395 if (error) {
396 return error;
397 }
398
399 kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
400 }
401
402 return 0;
403 }
404
405 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
406
407 #define BSD_HOST 1
408 STATIC int
409 sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
410 {
411 host_basic_info_data_t hinfo;
412 kern_return_t kret;
413 uint32_t size;
414 int changed;
415 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
416 struct _processor_statistics_np *buf;
417 int error;
418
419 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
420 if (kret != KERN_SUCCESS) {
421 return EINVAL;
422 }
423
424 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
425
426 if (req->oldlen < size) {
427 return EINVAL;
428 }
429
430 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
431
432 kret = get_sched_statistics(buf, &size);
433 if (kret != KERN_SUCCESS) {
434 error = EINVAL;
435 goto out;
436 }
437
438 error = sysctl_io_opaque(req, buf, size, &changed);
439 if (error) {
440 goto out;
441 }
442
443 if (changed) {
444 panic("Sched info changed?!");
445 }
446 out:
447 FREE(buf, M_TEMP);
448 return error;
449 }
450
451 SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
452
453 STATIC int
454 sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
455 {
456 boolean_t active;
457 int res;
458
459 if (req->newlen != sizeof(active)) {
460 return EINVAL;
461 }
462
463 res = copyin(req->newptr, &active, sizeof(active));
464 if (res != 0) {
465 return res;
466 }
467
468 return set_sched_stats_active(active);
469 }
470
471 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
472
473 extern uint32_t sched_debug_flags;
474 SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
475
476 #if (DEBUG || DEVELOPMENT)
477 extern boolean_t doprnt_hide_pointers;
478 SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
479 #endif
480
481 extern int get_kernel_symfile(proc_t, char **);
482
483 #if COUNT_SYSCALLS
484 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
485
486 extern unsigned int nsysent;
487 extern int syscalls_log[];
488 extern const char *syscallnames[];
489
490 STATIC int
491 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
492 {
493 __unused int cmd = oidp->oid_arg2; /* subcommand*/
494 __unused int *name = arg1; /* oid element argument vector */
495 __unused int namelen = arg2; /* number of oid element arguments */
496 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
497 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
498 user_addr_t newp = req->newptr; /* user buffer copy in address */
499 size_t newlen = req->newlen; /* user buffer copy in size */
500 int error;
501
502 int tmp;
503
504 /* valid values passed in:
505 * = 0 means don't keep called counts for each bsd syscall
506 * > 0 means keep called counts for each bsd syscall
507 * = 2 means dump current counts to the system log
508 * = 3 means reset all counts
509 * for example, to dump current counts:
510 * sysctl -w kern.count_calls=2
511 */
512 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
513 if ( error != 0 ) {
514 return (error);
515 }
516
517 if ( tmp == 1 ) {
518 do_count_syscalls = 1;
519 }
520 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
521 int i;
522 for ( i = 0; i < nsysent; i++ ) {
523 if ( syscalls_log[i] != 0 ) {
524 if ( tmp == 2 ) {
525 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
526 }
527 else {
528 syscalls_log[i] = 0;
529 }
530 }
531 }
532 if ( tmp != 0 ) {
533 do_count_syscalls = 1;
534 }
535 }
536
537 /* adjust index so we return the right required/consumed amount */
538 if (!error)
539 req->oldidx += req->oldlen;
540
541 return (error);
542 }
543 SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
544 0, /* Pointer argument (arg1) */
545 0, /* Integer argument (arg2) */
546 sysctl_docountsyscalls, /* Handler function */
547 NULL, /* Data pointer */
548 "");
549 #endif /* COUNT_SYSCALLS */
550
551 /*
552 * The following sysctl_* functions should not be used
553 * any more, as they can only cope with callers in
554 * user mode: Use new-style
555 * sysctl_io_number()
556 * sysctl_io_string()
557 * sysctl_io_opaque()
558 * instead.
559 */
560
561 /*
562 * Validate parameters and get old / set new parameters
563 * for an integer-valued sysctl function.
564 */
565 int
566 sysctl_int(user_addr_t oldp, size_t *oldlenp,
567 user_addr_t newp, size_t newlen, int *valp)
568 {
569 int error = 0;
570
571 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
572 return (EFAULT);
573 if (oldp && *oldlenp < sizeof(int))
574 return (ENOMEM);
575 if (newp && newlen != sizeof(int))
576 return (EINVAL);
577 *oldlenp = sizeof(int);
578 if (oldp)
579 error = copyout(valp, oldp, sizeof(int));
580 if (error == 0 && newp) {
581 error = copyin(newp, valp, sizeof(int));
582 AUDIT_ARG(value32, *valp);
583 }
584 return (error);
585 }
586
587 /*
588 * Validate parameters and get old / set new parameters
589 * for an quad(64bit)-valued sysctl function.
590 */
591 int
592 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
593 user_addr_t newp, size_t newlen, quad_t *valp)
594 {
595 int error = 0;
596
597 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
598 return (EFAULT);
599 if (oldp && *oldlenp < sizeof(quad_t))
600 return (ENOMEM);
601 if (newp && newlen != sizeof(quad_t))
602 return (EINVAL);
603 *oldlenp = sizeof(quad_t);
604 if (oldp)
605 error = copyout(valp, oldp, sizeof(quad_t));
606 if (error == 0 && newp)
607 error = copyin(newp, valp, sizeof(quad_t));
608 return (error);
609 }
610
611 STATIC int
612 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
613 {
614 if (p->p_pid != (pid_t)*(int*)arg)
615 return(0);
616 else
617 return(1);
618 }
619
620 STATIC int
621 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
622 {
623 if (p->p_pgrpid != (pid_t)*(int*)arg)
624 return(0);
625 else
626 return(1);
627 }
628
629 STATIC int
630 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
631 {
632 int retval;
633 struct tty *tp;
634
635 /* This is very racy but list lock is held.. Hmmm. */
636 if ((p->p_flag & P_CONTROLT) == 0 ||
637 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
638 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
639 tp->t_dev != (dev_t)*(int*)arg)
640 retval = 0;
641 else
642 retval = 1;
643
644 return(retval);
645 }
646
647 STATIC int
648 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
649 {
650 kauth_cred_t my_cred;
651 uid_t uid;
652
653 if (p->p_ucred == NULL)
654 return(0);
655 my_cred = kauth_cred_proc_ref(p);
656 uid = kauth_cred_getuid(my_cred);
657 kauth_cred_unref(&my_cred);
658
659 if (uid != (uid_t)*(int*)arg)
660 return(0);
661 else
662 return(1);
663 }
664
665
666 STATIC int
667 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
668 {
669 kauth_cred_t my_cred;
670 uid_t ruid;
671
672 if (p->p_ucred == NULL)
673 return(0);
674 my_cred = kauth_cred_proc_ref(p);
675 ruid = kauth_cred_getruid(my_cred);
676 kauth_cred_unref(&my_cred);
677
678 if (ruid != (uid_t)*(int*)arg)
679 return(0);
680 else
681 return(1);
682 }
683
684 /*
685 * try over estimating by 5 procs
686 */
687 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
688 struct sysdoproc_args {
689 int buflen;
690 void *kprocp;
691 boolean_t is_64_bit;
692 user_addr_t dp;
693 size_t needed;
694 int sizeof_kproc;
695 int *errorp;
696 int uidcheck;
697 int ruidcheck;
698 int ttycheck;
699 int uidval;
700 };
701
702 int
703 sysdoproc_callback(proc_t p, void *arg)
704 {
705 struct sysdoproc_args *args = arg;
706
707 if (args->buflen >= args->sizeof_kproc) {
708 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
709 return (PROC_RETURNED);
710 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
711 return (PROC_RETURNED);
712 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
713 return (PROC_RETURNED);
714
715 bzero(args->kprocp, args->sizeof_kproc);
716 if (args->is_64_bit)
717 fill_user64_proc(p, args->kprocp);
718 else
719 fill_user32_proc(p, args->kprocp);
720 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
721 if (error) {
722 *args->errorp = error;
723 return (PROC_RETURNED_DONE);
724 }
725 args->dp += args->sizeof_kproc;
726 args->buflen -= args->sizeof_kproc;
727 }
728 args->needed += args->sizeof_kproc;
729 return (PROC_RETURNED);
730 }
731
732 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
733 STATIC int
734 sysctl_prochandle SYSCTL_HANDLER_ARGS
735 {
736 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
737 int *name = arg1; /* oid element argument vector */
738 int namelen = arg2; /* number of oid element arguments */
739 user_addr_t where = req->oldptr;/* user buffer copy out address */
740
741 user_addr_t dp = where;
742 size_t needed = 0;
743 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
744 int error = 0;
745 boolean_t is_64_bit = proc_is64bit(current_proc());
746 struct user32_kinfo_proc user32_kproc;
747 struct user64_kinfo_proc user_kproc;
748 int sizeof_kproc;
749 void *kprocp;
750 int (*filterfn)(proc_t, void *) = 0;
751 struct sysdoproc_args args;
752 int uidcheck = 0;
753 int ruidcheck = 0;
754 int ttycheck = 0;
755
756 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
757 return (EINVAL);
758
759 if (is_64_bit) {
760 sizeof_kproc = sizeof(user_kproc);
761 kprocp = &user_kproc;
762 } else {
763 sizeof_kproc = sizeof(user32_kproc);
764 kprocp = &user32_kproc;
765 }
766
767 switch (cmd) {
768
769 case KERN_PROC_PID:
770 filterfn = sysdoproc_filt_KERN_PROC_PID;
771 break;
772
773 case KERN_PROC_PGRP:
774 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
775 break;
776
777 case KERN_PROC_TTY:
778 ttycheck = 1;
779 break;
780
781 case KERN_PROC_UID:
782 uidcheck = 1;
783 break;
784
785 case KERN_PROC_RUID:
786 ruidcheck = 1;
787 break;
788
789 case KERN_PROC_ALL:
790 break;
791
792 default:
793 /* must be kern.proc.<unknown> */
794 return (ENOTSUP);
795 }
796
797 error = 0;
798 args.buflen = buflen;
799 args.kprocp = kprocp;
800 args.is_64_bit = is_64_bit;
801 args.dp = dp;
802 args.needed = needed;
803 args.errorp = &error;
804 args.uidcheck = uidcheck;
805 args.ruidcheck = ruidcheck;
806 args.ttycheck = ttycheck;
807 args.sizeof_kproc = sizeof_kproc;
808 if (namelen)
809 args.uidval = name[0];
810
811 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
812 sysdoproc_callback, &args, filterfn, name);
813
814 if (error)
815 return (error);
816
817 dp = args.dp;
818 needed = args.needed;
819
820 if (where != USER_ADDR_NULL) {
821 req->oldlen = dp - where;
822 if (needed > req->oldlen)
823 return (ENOMEM);
824 } else {
825 needed += KERN_PROCSLOP;
826 req->oldlen = needed;
827 }
828 /* adjust index so we return the right required/consumed amount */
829 req->oldidx += req->oldlen;
830 return (0);
831 }
832
833 /*
834 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
835 * in the sysctl declaration itself, which comes into the handler function
836 * as 'oidp->oid_arg2'.
837 *
838 * For these particular sysctls, since they have well known OIDs, we could
839 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
840 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
841 * of a well known value with a common handler function. This is desirable,
842 * because we want well known values to "go away" at some future date.
843 *
844 * It should be noted that the value of '((int *)arg1)[1]' is used for many
845 * an integer parameter to the subcommand for many of these sysctls; we'd
846 * rather have used '((int *)arg1)[0]' for that, or even better, an element
847 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
848 * and then use leaf-node permissions enforcement, but that would have
849 * necessitated modifying user space code to correspond to the interface
850 * change, and we are striving for binary backward compatibility here; even
851 * though these are SPI, and not intended for use by user space applications
852 * which are not themselves system tools or libraries, some applications
853 * have erroneously used them.
854 */
855 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
856 0, /* Pointer argument (arg1) */
857 KERN_PROC_ALL, /* Integer argument (arg2) */
858 sysctl_prochandle, /* Handler function */
859 NULL, /* Data is size variant on ILP32/LP64 */
860 "");
861 SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
862 0, /* Pointer argument (arg1) */
863 KERN_PROC_PID, /* Integer argument (arg2) */
864 sysctl_prochandle, /* Handler function */
865 NULL, /* Data is size variant on ILP32/LP64 */
866 "");
867 SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
868 0, /* Pointer argument (arg1) */
869 KERN_PROC_TTY, /* Integer argument (arg2) */
870 sysctl_prochandle, /* Handler function */
871 NULL, /* Data is size variant on ILP32/LP64 */
872 "");
873 SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
874 0, /* Pointer argument (arg1) */
875 KERN_PROC_PGRP, /* Integer argument (arg2) */
876 sysctl_prochandle, /* Handler function */
877 NULL, /* Data is size variant on ILP32/LP64 */
878 "");
879 SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
880 0, /* Pointer argument (arg1) */
881 KERN_PROC_UID, /* Integer argument (arg2) */
882 sysctl_prochandle, /* Handler function */
883 NULL, /* Data is size variant on ILP32/LP64 */
884 "");
885 SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
886 0, /* Pointer argument (arg1) */
887 KERN_PROC_RUID, /* Integer argument (arg2) */
888 sysctl_prochandle, /* Handler function */
889 NULL, /* Data is size variant on ILP32/LP64 */
890 "");
891 SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
892 0, /* Pointer argument (arg1) */
893 KERN_PROC_LCID, /* Integer argument (arg2) */
894 sysctl_prochandle, /* Handler function */
895 NULL, /* Data is size variant on ILP32/LP64 */
896 "");
897
898
899 /*
900 * Fill in non-zero fields of an eproc structure for the specified process.
901 */
902 STATIC void
903 fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
904 {
905 struct tty *tp;
906 struct pgrp *pg;
907 struct session *sessp;
908 kauth_cred_t my_cred;
909
910 pg = proc_pgrp(p);
911 sessp = proc_session(p);
912
913 if (pg != PGRP_NULL) {
914 ep->e_pgid = p->p_pgrpid;
915 ep->e_jobc = pg->pg_jobc;
916 if (sessp != SESSION_NULL && sessp->s_ttyvp)
917 ep->e_flag = EPROC_CTTY;
918 }
919 ep->e_ppid = p->p_ppid;
920 if (p->p_ucred) {
921 my_cred = kauth_cred_proc_ref(p);
922
923 /* A fake historical pcred */
924 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
925 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
926 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
927 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
928
929 /* A fake historical *kauth_cred_t */
930 ep->e_ucred.cr_ref = my_cred->cr_ref;
931 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
932 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
933 bcopy(posix_cred_get(my_cred)->cr_groups,
934 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
935
936 kauth_cred_unref(&my_cred);
937 }
938
939 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
940 (tp = SESSION_TP(sessp))) {
941 ep->e_tdev = tp->t_dev;
942 ep->e_tpgid = sessp->s_ttypgrpid;
943 } else
944 ep->e_tdev = NODEV;
945
946 if (sessp != SESSION_NULL) {
947 if (SESS_LEADER(p, sessp))
948 ep->e_flag |= EPROC_SLEADER;
949 session_rele(sessp);
950 }
951 if (pg != PGRP_NULL)
952 pg_rele(pg);
953 }
954
955 /*
956 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
957 */
958 STATIC void
959 fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
960 {
961 struct tty *tp;
962 struct pgrp *pg;
963 struct session *sessp;
964 kauth_cred_t my_cred;
965
966 pg = proc_pgrp(p);
967 sessp = proc_session(p);
968
969 if (pg != PGRP_NULL) {
970 ep->e_pgid = p->p_pgrpid;
971 ep->e_jobc = pg->pg_jobc;
972 if (sessp != SESSION_NULL && sessp->s_ttyvp)
973 ep->e_flag = EPROC_CTTY;
974 }
975 ep->e_ppid = p->p_ppid;
976 if (p->p_ucred) {
977 my_cred = kauth_cred_proc_ref(p);
978
979 /* A fake historical pcred */
980 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
981 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
982 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
983 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
984
985 /* A fake historical *kauth_cred_t */
986 ep->e_ucred.cr_ref = my_cred->cr_ref;
987 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
988 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
989 bcopy(posix_cred_get(my_cred)->cr_groups,
990 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
991
992 kauth_cred_unref(&my_cred);
993 }
994
995 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
996 (tp = SESSION_TP(sessp))) {
997 ep->e_tdev = tp->t_dev;
998 ep->e_tpgid = sessp->s_ttypgrpid;
999 } else
1000 ep->e_tdev = NODEV;
1001
1002 if (sessp != SESSION_NULL) {
1003 if (SESS_LEADER(p, sessp))
1004 ep->e_flag |= EPROC_SLEADER;
1005 session_rele(sessp);
1006 }
1007 if (pg != PGRP_NULL)
1008 pg_rele(pg);
1009 }
1010
1011 /*
1012 * Fill in an eproc structure for the specified process.
1013 * bzeroed by our caller, so only set non-zero fields.
1014 */
1015 STATIC void
1016 fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1017 {
1018 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1019 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1020 exp->p_flag = p->p_flag;
1021 if (p->p_lflag & P_LTRACED)
1022 exp->p_flag |= P_TRACED;
1023 if (p->p_lflag & P_LPPWAIT)
1024 exp->p_flag |= P_PPWAIT;
1025 if (p->p_lflag & P_LEXIT)
1026 exp->p_flag |= P_WEXIT;
1027 exp->p_stat = p->p_stat;
1028 exp->p_pid = p->p_pid;
1029 exp->p_oppid = p->p_oppid;
1030 /* Mach related */
1031 exp->user_stack = p->user_stack;
1032 exp->p_debugger = p->p_debugger;
1033 exp->sigwait = p->sigwait;
1034 /* scheduling */
1035 #ifdef _PROC_HAS_SCHEDINFO_
1036 exp->p_estcpu = p->p_estcpu;
1037 exp->p_pctcpu = p->p_pctcpu;
1038 exp->p_slptime = p->p_slptime;
1039 #endif
1040 exp->p_realtimer.it_interval.tv_sec =
1041 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1042 exp->p_realtimer.it_interval.tv_usec =
1043 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1044
1045 exp->p_realtimer.it_value.tv_sec =
1046 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1047 exp->p_realtimer.it_value.tv_usec =
1048 (__int32_t)p->p_realtimer.it_value.tv_usec;
1049
1050 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1051 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1052
1053 exp->p_sigignore = p->p_sigignore;
1054 exp->p_sigcatch = p->p_sigcatch;
1055 exp->p_priority = p->p_priority;
1056 exp->p_nice = p->p_nice;
1057 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1058 exp->p_xstat = p->p_xstat;
1059 exp->p_acflag = p->p_acflag;
1060 }
1061
1062 /*
1063 * Fill in an LP64 version of extern_proc structure for the specified process.
1064 */
1065 STATIC void
1066 fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1067 {
1068 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1069 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1070 exp->p_flag = p->p_flag;
1071 if (p->p_lflag & P_LTRACED)
1072 exp->p_flag |= P_TRACED;
1073 if (p->p_lflag & P_LPPWAIT)
1074 exp->p_flag |= P_PPWAIT;
1075 if (p->p_lflag & P_LEXIT)
1076 exp->p_flag |= P_WEXIT;
1077 exp->p_stat = p->p_stat;
1078 exp->p_pid = p->p_pid;
1079 exp->p_oppid = p->p_oppid;
1080 /* Mach related */
1081 exp->user_stack = p->user_stack;
1082 exp->p_debugger = p->p_debugger;
1083 exp->sigwait = p->sigwait;
1084 /* scheduling */
1085 #ifdef _PROC_HAS_SCHEDINFO_
1086 exp->p_estcpu = p->p_estcpu;
1087 exp->p_pctcpu = p->p_pctcpu;
1088 exp->p_slptime = p->p_slptime;
1089 #endif
1090 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1091 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1092
1093 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1094 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1095
1096 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1097 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1098
1099 exp->p_sigignore = p->p_sigignore;
1100 exp->p_sigcatch = p->p_sigcatch;
1101 exp->p_priority = p->p_priority;
1102 exp->p_nice = p->p_nice;
1103 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1104 exp->p_xstat = p->p_xstat;
1105 exp->p_acflag = p->p_acflag;
1106 }
1107
1108 STATIC void
1109 fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1110 {
1111 /* on a 64 bit kernel, 32 bit users get some truncated information */
1112 fill_user32_externproc(p, &kp->kp_proc);
1113 fill_user32_eproc(p, &kp->kp_eproc);
1114 }
1115
1116 STATIC void
1117 fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1118 {
1119 fill_user64_externproc(p, &kp->kp_proc);
1120 fill_user64_eproc(p, &kp->kp_eproc);
1121 }
1122
1123 STATIC int
1124 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1125 {
1126 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1127 int *name = arg1; /* oid element argument vector */
1128 int namelen = arg2; /* number of oid element arguments */
1129 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1130 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1131 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1132 // size_t newlen = req->newlen; /* user buffer copy in size */
1133
1134 int ret=0;
1135
1136 if (namelen == 0)
1137 return(ENOTSUP);
1138
1139 switch(name[0]) {
1140 case KERN_KDEFLAGS:
1141 case KERN_KDDFLAGS:
1142 case KERN_KDENABLE:
1143 case KERN_KDGETBUF:
1144 case KERN_KDSETUP:
1145 case KERN_KDREMOVE:
1146 case KERN_KDSETREG:
1147 case KERN_KDGETREG:
1148 case KERN_KDREADTR:
1149 case KERN_KDWRITETR:
1150 case KERN_KDWRITEMAP:
1151 case KERN_KDTEST:
1152 case KERN_KDPIDTR:
1153 case KERN_KDTHRMAP:
1154 case KERN_KDPIDEX:
1155 case KERN_KDSETBUF:
1156 case KERN_KDREADCURTHRMAP:
1157 case KERN_KDSET_TYPEFILTER:
1158 case KERN_KDBUFWAIT:
1159 case KERN_KDCPUMAP:
1160 case KERN_KDWRITEMAP_V3:
1161 case KERN_KDWRITETR_V3:
1162 ret = kdbg_control(name, namelen, oldp, oldlenp);
1163 break;
1164 default:
1165 ret= ENOTSUP;
1166 break;
1167 }
1168
1169 /* adjust index so we return the right required/consumed amount */
1170 if (!ret)
1171 req->oldidx += req->oldlen;
1172
1173 return (ret);
1174 }
1175 SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1176 0, /* Pointer argument (arg1) */
1177 0, /* Integer argument (arg2) */
1178 sysctl_kdebug_ops, /* Handler function */
1179 NULL, /* Data pointer */
1180 "");
1181
1182
1183 /*
1184 * Return the top *sizep bytes of the user stack, or the entire area of the
1185 * user stack down through the saved exec_path, whichever is smaller.
1186 */
1187 STATIC int
1188 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1189 {
1190 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1191 int *name = arg1; /* oid element argument vector */
1192 int namelen = arg2; /* number of oid element arguments */
1193 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1194 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1195 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1196 // size_t newlen = req->newlen; /* user buffer copy in size */
1197 int error;
1198
1199 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1200
1201 /* adjust index so we return the right required/consumed amount */
1202 if (!error)
1203 req->oldidx += req->oldlen;
1204
1205 return (error);
1206 }
1207 SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1208 0, /* Pointer argument (arg1) */
1209 0, /* Integer argument (arg2) */
1210 sysctl_doprocargs, /* Handler function */
1211 NULL, /* Data pointer */
1212 "");
1213
1214 STATIC int
1215 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1216 {
1217 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1218 int *name = arg1; /* oid element argument vector */
1219 int namelen = arg2; /* number of oid element arguments */
1220 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1221 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1222 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1223 // size_t newlen = req->newlen; /* user buffer copy in size */
1224 int error;
1225
1226 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1227
1228 /* adjust index so we return the right required/consumed amount */
1229 if (!error)
1230 req->oldidx += req->oldlen;
1231
1232 return (error);
1233 }
1234 SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1235 0, /* Pointer argument (arg1) */
1236 0, /* Integer argument (arg2) */
1237 sysctl_doprocargs2, /* Handler function */
1238 NULL, /* Data pointer */
1239 "");
1240
1241 STATIC int
1242 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1243 size_t *sizep, proc_t cur_proc, int argc_yes)
1244 {
1245 proc_t p;
1246 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1247 int error = 0;
1248 struct _vm_map *proc_map;
1249 struct task * task;
1250 vm_map_copy_t tmp;
1251 user_addr_t arg_addr;
1252 size_t arg_size;
1253 caddr_t data;
1254 size_t argslen=0;
1255 int size;
1256 vm_offset_t copy_start, copy_end;
1257 kern_return_t ret;
1258 int pid;
1259 kauth_cred_t my_cred;
1260 uid_t uid;
1261
1262 if ( namelen < 1 )
1263 return(EINVAL);
1264
1265 if (argc_yes)
1266 buflen -= sizeof(int); /* reserve first word to return argc */
1267
1268 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1269 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1270 /* is not NULL then the caller wants us to return the length needed to */
1271 /* hold the data we would return */
1272 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1273 return(EINVAL);
1274 }
1275 arg_size = buflen;
1276
1277 /*
1278 * Lookup process by pid
1279 */
1280 pid = name[0];
1281 p = proc_find(pid);
1282 if (p == NULL) {
1283 return(EINVAL);
1284 }
1285
1286 /*
1287 * Copy the top N bytes of the stack.
1288 * On all machines we have so far, the stack grows
1289 * downwards.
1290 *
1291 * If the user expects no more than N bytes of
1292 * argument list, use that as a guess for the
1293 * size.
1294 */
1295
1296 if (!p->user_stack) {
1297 proc_rele(p);
1298 return(EINVAL);
1299 }
1300
1301 if (where == USER_ADDR_NULL) {
1302 /* caller only wants to know length of proc args data */
1303 if (sizep == NULL) {
1304 proc_rele(p);
1305 return(EFAULT);
1306 }
1307
1308 size = p->p_argslen;
1309 proc_rele(p);
1310 if (argc_yes) {
1311 size += sizeof(int);
1312 }
1313 else {
1314 /*
1315 * old PROCARGS will return the executable's path and plus some
1316 * extra space for work alignment and data tags
1317 */
1318 size += PATH_MAX + (6 * sizeof(int));
1319 }
1320 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1321 *sizep = size;
1322 return (0);
1323 }
1324
1325 my_cred = kauth_cred_proc_ref(p);
1326 uid = kauth_cred_getuid(my_cred);
1327 kauth_cred_unref(&my_cred);
1328
1329 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1330 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1331 proc_rele(p);
1332 return (EINVAL);
1333 }
1334
1335 if ((u_int)arg_size > p->p_argslen)
1336 arg_size = round_page(p->p_argslen);
1337
1338 arg_addr = p->user_stack - arg_size;
1339
1340
1341 /*
1342 * Before we can block (any VM code), make another
1343 * reference to the map to keep it alive. We do
1344 * that by getting a reference on the task itself.
1345 */
1346 task = p->task;
1347 if (task == NULL) {
1348 proc_rele(p);
1349 return(EINVAL);
1350 }
1351
1352 argslen = p->p_argslen;
1353 /*
1354 * Once we have a task reference we can convert that into a
1355 * map reference, which we will use in the calls below. The
1356 * task/process may change its map after we take this reference
1357 * (see execve), but the worst that will happen then is a return
1358 * of stale info (which is always a possibility).
1359 */
1360 task_reference(task);
1361 proc_rele(p);
1362 proc_map = get_task_map_reference(task);
1363 task_deallocate(task);
1364
1365 if (proc_map == NULL)
1366 return(EINVAL);
1367
1368
1369 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size), VM_KERN_MEMORY_BSD);
1370 if (ret != KERN_SUCCESS) {
1371 vm_map_deallocate(proc_map);
1372 return(ENOMEM);
1373 }
1374
1375 copy_end = round_page(copy_start + arg_size);
1376
1377 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1378 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1379 vm_map_deallocate(proc_map);
1380 kmem_free(kernel_map, copy_start,
1381 round_page(arg_size));
1382 return (EIO);
1383 }
1384
1385 /*
1386 * Now that we've done the copyin from the process'
1387 * map, we can release the reference to it.
1388 */
1389 vm_map_deallocate(proc_map);
1390
1391 if( vm_map_copy_overwrite(kernel_map,
1392 (vm_map_address_t)copy_start,
1393 tmp, FALSE) != KERN_SUCCESS) {
1394 kmem_free(kernel_map, copy_start,
1395 round_page(arg_size));
1396 vm_map_copy_discard(tmp);
1397 return (EIO);
1398 }
1399
1400 if (arg_size > argslen) {
1401 data = (caddr_t) (copy_end - argslen);
1402 size = argslen;
1403 } else {
1404 data = (caddr_t) (copy_end - arg_size);
1405 size = arg_size;
1406 }
1407
1408 /*
1409 * When these sysctls were introduced, the first string in the strings
1410 * section was just the bare path of the executable. However, for security
1411 * reasons we now prefix this string with executable_path= so it can be
1412 * parsed getenv style. To avoid binary compatability issues with exising
1413 * callers of this sysctl, we strip it off here if present.
1414 * (rdar://problem/13746466)
1415 */
1416 #define EXECUTABLE_KEY "executable_path="
1417 if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
1418 data += strlen(EXECUTABLE_KEY);
1419 size -= strlen(EXECUTABLE_KEY);
1420 }
1421
1422 if (argc_yes) {
1423 /* Put processes argc as the first word in the copyout buffer */
1424 suword(where, p->p_argc);
1425 error = copyout(data, (where + sizeof(int)), size);
1426 size += sizeof(int);
1427 } else {
1428 error = copyout(data, where, size);
1429
1430 /*
1431 * Make the old PROCARGS work to return the executable's path
1432 * But, only if there is enough space in the provided buffer
1433 *
1434 * on entry: data [possibily] points to the beginning of the path
1435 *
1436 * Note: we keep all pointers&sizes aligned to word boundries
1437 */
1438 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1439 {
1440 int binPath_sz, alignedBinPath_sz = 0;
1441 int extraSpaceNeeded, addThis;
1442 user_addr_t placeHere;
1443 char * str = (char *) data;
1444 int max_len = size;
1445
1446 /* Some apps are really bad about messing up their stacks
1447 So, we have to be extra careful about getting the length
1448 of the executing binary. If we encounter an error, we bail.
1449 */
1450
1451 /* Limit ourselves to PATH_MAX paths */
1452 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1453
1454 binPath_sz = 0;
1455
1456 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1457 binPath_sz++;
1458
1459 /* If we have a NUL terminator, copy it, too */
1460 if (binPath_sz < max_len-1) binPath_sz += 1;
1461
1462 /* Pre-Flight the space requiremnts */
1463
1464 /* Account for the padding that fills out binPath to the next word */
1465 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1466
1467 placeHere = where + size;
1468
1469 /* Account for the bytes needed to keep placeHere word aligned */
1470 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1471
1472 /* Add up all the space that is needed */
1473 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1474
1475 /* is there is room to tack on argv[0]? */
1476 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1477 {
1478 placeHere += addThis;
1479 suword(placeHere, 0);
1480 placeHere += sizeof(int);
1481 suword(placeHere, 0xBFFF0000);
1482 placeHere += sizeof(int);
1483 suword(placeHere, 0);
1484 placeHere += sizeof(int);
1485 error = copyout(data, placeHere, binPath_sz);
1486 if ( ! error )
1487 {
1488 placeHere += binPath_sz;
1489 suword(placeHere, 0);
1490 size += extraSpaceNeeded;
1491 }
1492 }
1493 }
1494 }
1495
1496 if (copy_start != (vm_offset_t) 0) {
1497 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1498 }
1499 if (error) {
1500 return(error);
1501 }
1502
1503 if (where != USER_ADDR_NULL)
1504 *sizep = size;
1505 return (0);
1506 }
1507
1508
1509 /*
1510 * Max number of concurrent aio requests
1511 */
1512 STATIC int
1513 sysctl_aiomax
1514 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1515 {
1516 int new_value, changed;
1517 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1518 if (changed) {
1519 /* make sure the system-wide limit is greater than the per process limit */
1520 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1521 aio_max_requests = new_value;
1522 else
1523 error = EINVAL;
1524 }
1525 return(error);
1526 }
1527
1528
1529 /*
1530 * Max number of concurrent aio requests per process
1531 */
1532 STATIC int
1533 sysctl_aioprocmax
1534 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1535 {
1536 int new_value, changed;
1537 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1538 if (changed) {
1539 /* make sure per process limit is less than the system-wide limit */
1540 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1541 aio_max_requests_per_process = new_value;
1542 else
1543 error = EINVAL;
1544 }
1545 return(error);
1546 }
1547
1548
1549 /*
1550 * Max number of async IO worker threads
1551 */
1552 STATIC int
1553 sysctl_aiothreads
1554 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1555 {
1556 int new_value, changed;
1557 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1558 if (changed) {
1559 /* we only allow an increase in the number of worker threads */
1560 if (new_value > aio_worker_threads ) {
1561 _aio_create_worker_threads((new_value - aio_worker_threads));
1562 aio_worker_threads = new_value;
1563 }
1564 else
1565 error = EINVAL;
1566 }
1567 return(error);
1568 }
1569
1570
1571 /*
1572 * System-wide limit on the max number of processes
1573 */
1574 STATIC int
1575 sysctl_maxproc
1576 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1577 {
1578 int new_value, changed;
1579 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1580 if (changed) {
1581 AUDIT_ARG(value32, new_value);
1582 /* make sure the system-wide limit is less than the configured hard
1583 limit set at kernel compilation */
1584 if (new_value <= hard_maxproc && new_value > 0)
1585 maxproc = new_value;
1586 else
1587 error = EINVAL;
1588 }
1589 return(error);
1590 }
1591
1592 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1593 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1594 ostype, 0, "");
1595 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1596 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1597 osrelease, 0, "");
1598 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1599 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1600 (int *)NULL, BSD, "");
1601 SYSCTL_STRING(_kern, KERN_VERSION, version,
1602 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1603 version, 0, "");
1604 SYSCTL_STRING(_kern, OID_AUTO, uuid,
1605 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1606 &kernel_uuid_string[0], 0, "");
1607
1608 #if DEBUG
1609 int debug_kprint_syscall = 0;
1610 char debug_kprint_syscall_process[MAXCOMLEN+1];
1611
1612 /* Thread safe: bits and string value are not used to reclaim state */
1613 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1614 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1615 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1616 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1617 "name of process for kprintf syscall tracing");
1618
1619 int debug_kprint_current_process(const char **namep)
1620 {
1621 struct proc *p = current_proc();
1622
1623 if (p == NULL) {
1624 return 0;
1625 }
1626
1627 if (debug_kprint_syscall_process[0]) {
1628 /* user asked to scope tracing to a particular process name */
1629 if(0 == strncmp(debug_kprint_syscall_process,
1630 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1631 /* no value in telling the user that we traced what they asked */
1632 if(namep) *namep = NULL;
1633
1634 return 1;
1635 } else {
1636 return 0;
1637 }
1638 }
1639
1640 /* trace all processes. Tell user what we traced */
1641 if (namep) {
1642 *namep = p->p_comm;
1643 }
1644
1645 return 1;
1646 }
1647 #endif
1648
1649 /* PR-5293665: need to use a callback function for kern.osversion to set
1650 * osversion in IORegistry */
1651
1652 STATIC int
1653 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1654 {
1655 int rval = 0;
1656
1657 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1658
1659 if (req->newptr) {
1660 IORegistrySetOSBuildVersion((char *)arg1);
1661 }
1662
1663 return rval;
1664 }
1665
1666 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1667 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1668 osversion, 256 /* OSVERSIZE*/,
1669 sysctl_osversion, "A", "");
1670
1671 STATIC int
1672 sysctl_sysctl_bootargs
1673 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1674 {
1675 int error;
1676 char buf[256];
1677
1678 strlcpy(buf, PE_boot_args(), 256);
1679 error = sysctl_io_string(req, buf, 256, 0, NULL);
1680 return(error);
1681 }
1682
1683 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1684 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1685 NULL, 0,
1686 sysctl_sysctl_bootargs, "A", "bootargs");
1687
1688 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1689 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1690 &maxfiles, 0, "");
1691 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1692 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1693 (int *)NULL, ARG_MAX, "");
1694 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1695 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1696 (int *)NULL, _POSIX_VERSION, "");
1697 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1698 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1699 (int *)NULL, NGROUPS_MAX, "");
1700 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1701 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1702 (int *)NULL, 1, "");
1703 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1704 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1705 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1706 (int *)NULL, 1, "");
1707 #else
1708 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1709 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1710 NULL, 0, "");
1711 #endif
1712 SYSCTL_INT(_kern, OID_AUTO, num_files,
1713 CTLFLAG_RD | CTLFLAG_LOCKED,
1714 &nfiles, 0, "");
1715 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1716 CTLFLAG_RD | CTLFLAG_LOCKED,
1717 &numvnodes, 0, "");
1718 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1719 CTLFLAG_RD | CTLFLAG_LOCKED,
1720 &task_max, 0, "");
1721 SYSCTL_INT(_kern, OID_AUTO, num_threads,
1722 CTLFLAG_RD | CTLFLAG_LOCKED,
1723 &thread_max, 0, "");
1724 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1725 CTLFLAG_RD | CTLFLAG_LOCKED,
1726 &task_threadmax, 0, "");
1727
1728 STATIC int
1729 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1730 {
1731 int oldval = desiredvnodes;
1732 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1733
1734 if (oldval != desiredvnodes) {
1735 reset_vmobjectcache(oldval, desiredvnodes);
1736 resize_namecache(desiredvnodes);
1737 }
1738
1739 return(error);
1740 }
1741
1742 SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1743 CTLFLAG_RW | CTLFLAG_LOCKED,
1744 &nc_disabled, 0, "");
1745
1746 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1747 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1748 0, 0, sysctl_maxvnodes, "I", "");
1749
1750 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1751 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1752 0, 0, sysctl_maxproc, "I", "");
1753
1754 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1755 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1756 0, 0, sysctl_aiomax, "I", "");
1757
1758 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1759 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1760 0, 0, sysctl_aioprocmax, "I", "");
1761
1762 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1763 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1764 0, 0, sysctl_aiothreads, "I", "");
1765
1766 #if (DEVELOPMENT || DEBUG)
1767 extern int sched_smt_balance;
1768 SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1769 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1770 &sched_smt_balance, 0, "");
1771 #endif
1772
1773 STATIC int
1774 sysctl_securelvl
1775 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1776 {
1777 int new_value, changed;
1778 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1779 if (changed) {
1780 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1781 proc_list_lock();
1782 securelevel = new_value;
1783 proc_list_unlock();
1784 } else {
1785 error = EPERM;
1786 }
1787 }
1788 return(error);
1789 }
1790
1791 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1792 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1793 0, 0, sysctl_securelvl, "I", "");
1794
1795
1796 STATIC int
1797 sysctl_domainname
1798 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1799 {
1800 int error, changed;
1801 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1802 if (changed) {
1803 domainnamelen = strlen(domainname);
1804 }
1805 return(error);
1806 }
1807
1808 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1809 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1810 0, 0, sysctl_domainname, "A", "");
1811
1812 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1813 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1814 &hostid, 0, "");
1815
1816 STATIC int
1817 sysctl_hostname
1818 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1819 {
1820 int error, changed;
1821 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1822 if (changed) {
1823 hostnamelen = req->newlen;
1824 }
1825 return(error);
1826 }
1827
1828
1829 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1830 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1831 0, 0, sysctl_hostname, "A", "");
1832
1833 STATIC int
1834 sysctl_procname
1835 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1836 {
1837 /* Original code allowed writing, I'm copying this, although this all makes
1838 no sense to me. Besides, this sysctl is never used. */
1839 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1840 }
1841
1842 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
1843 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1844 0, 0, sysctl_procname, "A", "");
1845
1846 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
1847 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1848 &speculative_reads_disabled, 0, "");
1849
1850 SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
1851 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1852 &ignore_is_ssd, 0, "");
1853
1854 SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
1855 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1856 &preheat_max_bytes, 0, "");
1857
1858 SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
1859 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1860 &preheat_min_bytes, 0, "");
1861
1862 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1863 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1864 &speculative_prefetch_max, 0, "");
1865
1866 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
1867 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1868 &speculative_prefetch_max_iosize, 0, "");
1869
1870 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
1871 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1872 &vm_page_free_target, 0, "");
1873
1874 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
1875 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1876 &vm_page_free_min, 0, "");
1877
1878 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
1879 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1880 &vm_page_free_reserved, 0, "");
1881
1882 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
1883 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1884 &vm_page_speculative_percentage, 0, "");
1885
1886 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
1887 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1888 &vm_page_speculative_q_age_ms, 0, "");
1889
1890 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
1891 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1892 &vm_max_delayed_work_limit, 0, "");
1893
1894 SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
1895 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1896 &vm_max_batch, 0, "");
1897
1898 SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
1899 CTLFLAG_RD | CTLFLAG_LOCKED,
1900 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
1901
1902 STATIC int
1903 sysctl_boottime
1904 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1905 {
1906 struct timeval tv;
1907 boottime_timeval(&tv);
1908 struct proc *p = req->p;
1909
1910 if (proc_is64bit(p)) {
1911 struct user64_timeval t;
1912 t.tv_sec = tv.tv_sec;
1913 t.tv_usec = tv.tv_usec;
1914 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1915 } else {
1916 struct user32_timeval t;
1917 t.tv_sec = tv.tv_sec;
1918 t.tv_usec = tv.tv_usec;
1919 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1920 }
1921 }
1922
1923 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
1924 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1925 0, 0, sysctl_boottime, "S,timeval", "");
1926
1927 STATIC int
1928 sysctl_symfile
1929 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1930 {
1931 char *str;
1932 int error = get_kernel_symfile(req->p, &str);
1933 if (error)
1934 return (error);
1935 return sysctl_io_string(req, str, 0, 0, NULL);
1936 }
1937
1938
1939 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
1940 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
1941 0, 0, sysctl_symfile, "A", "");
1942
1943 #if NFSCLIENT
1944 STATIC int
1945 sysctl_netboot
1946 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1947 {
1948 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
1949 }
1950
1951 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
1952 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1953 0, 0, sysctl_netboot, "I", "");
1954 #endif
1955
1956 #ifdef CONFIG_IMGSRC_ACCESS
1957 /*
1958 * Legacy--act as if only one layer of nesting is possible.
1959 */
1960 STATIC int
1961 sysctl_imgsrcdev
1962 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1963 {
1964 vfs_context_t ctx = vfs_context_current();
1965 vnode_t devvp;
1966 int result;
1967
1968 if (!vfs_context_issuser(ctx)) {
1969 return EPERM;
1970 }
1971
1972 if (imgsrc_rootvnodes[0] == NULL) {
1973 return ENOENT;
1974 }
1975
1976 result = vnode_getwithref(imgsrc_rootvnodes[0]);
1977 if (result != 0) {
1978 return result;
1979 }
1980
1981 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
1982 result = vnode_getwithref(devvp);
1983 if (result != 0) {
1984 goto out;
1985 }
1986
1987 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
1988
1989 vnode_put(devvp);
1990 out:
1991 vnode_put(imgsrc_rootvnodes[0]);
1992 return result;
1993 }
1994
1995 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
1996 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1997 0, 0, sysctl_imgsrcdev, "I", "");
1998
1999 STATIC int
2000 sysctl_imgsrcinfo
2001 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2002 {
2003 int error;
2004 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
2005 uint32_t i;
2006 vnode_t rvp, devvp;
2007
2008 if (imgsrc_rootvnodes[0] == NULLVP) {
2009 return ENXIO;
2010 }
2011
2012 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2013 /*
2014 * Go get the root vnode.
2015 */
2016 rvp = imgsrc_rootvnodes[i];
2017 if (rvp == NULLVP) {
2018 break;
2019 }
2020
2021 error = vnode_get(rvp);
2022 if (error != 0) {
2023 return error;
2024 }
2025
2026 /*
2027 * For now, no getting at a non-local volume.
2028 */
2029 devvp = vnode_mount(rvp)->mnt_devvp;
2030 if (devvp == NULL) {
2031 vnode_put(rvp);
2032 return EINVAL;
2033 }
2034
2035 error = vnode_getwithref(devvp);
2036 if (error != 0) {
2037 vnode_put(rvp);
2038 return error;
2039 }
2040
2041 /*
2042 * Fill in info.
2043 */
2044 info[i].ii_dev = vnode_specrdev(devvp);
2045 info[i].ii_flags = 0;
2046 info[i].ii_height = i;
2047 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2048
2049 vnode_put(devvp);
2050 vnode_put(rvp);
2051 }
2052
2053 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2054 }
2055
2056 SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2057 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2058 0, 0, sysctl_imgsrcinfo, "I", "");
2059
2060 #endif /* CONFIG_IMGSRC_ACCESS */
2061
2062
2063 SYSCTL_DECL(_kern_timer);
2064 SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2065
2066
2067 SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2068 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2069 &mach_timer_coalescing_enabled, 0, "");
2070
2071 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2072 CTLFLAG_RW | CTLFLAG_LOCKED,
2073 &timer_deadline_tracking_bin_1, "");
2074 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2075 CTLFLAG_RW | CTLFLAG_LOCKED,
2076 &timer_deadline_tracking_bin_2, "");
2077
2078 SYSCTL_DECL(_kern_timer_longterm);
2079 SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2080
2081
2082 /* Must match definition in osfmk/kern/timer_call.c */
2083 enum {
2084 THRESHOLD, QCOUNT,
2085 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2086 LATENCY, LATENCY_MIN, LATENCY_MAX
2087 };
2088 extern uint64_t timer_sysctl_get(int);
2089 extern int timer_sysctl_set(int, uint64_t);
2090
2091 STATIC int
2092 sysctl_timer
2093 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2094 {
2095 int oid = (int)arg1;
2096 uint64_t value = timer_sysctl_get(oid);
2097 uint64_t new_value;
2098 int error;
2099 int changed;
2100
2101 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2102 if (changed)
2103 error = timer_sysctl_set(oid, new_value);
2104
2105 return error;
2106 }
2107
2108 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2109 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2110 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2111 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2112 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2113 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
2114 #if DEBUG
2115 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2116 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2117 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2118 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2119 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2120 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2121 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2122 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2123 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2124 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2125 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2126 (void *) SCANS, 0, sysctl_timer, "Q", "");
2127 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2128 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2129 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2130 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2131 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2132 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2133 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2134 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2135 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2136 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2137 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2138 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2139 #endif /* DEBUG */
2140
2141 STATIC int
2142 sysctl_usrstack
2143 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2144 {
2145 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2146 }
2147
2148 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2149 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2150 0, 0, sysctl_usrstack, "I", "");
2151
2152 STATIC int
2153 sysctl_usrstack64
2154 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2155 {
2156 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2157 }
2158
2159 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2160 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2161 0, 0, sysctl_usrstack64, "Q", "");
2162
2163 #if CONFIG_COREDUMP
2164
2165 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2166 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2167 corefilename, sizeof(corefilename), "");
2168
2169 STATIC int
2170 sysctl_coredump
2171 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2172 {
2173 #ifdef SECURE_KERNEL
2174 (void)req;
2175 return (ENOTSUP);
2176 #else
2177 int new_value, changed;
2178 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2179 if (changed) {
2180 if ((new_value == 0) || (new_value == 1))
2181 do_coredump = new_value;
2182 else
2183 error = EINVAL;
2184 }
2185 return(error);
2186 #endif
2187 }
2188
2189 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2190 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2191 0, 0, sysctl_coredump, "I", "");
2192
2193 STATIC int
2194 sysctl_suid_coredump
2195 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2196 {
2197 #ifdef SECURE_KERNEL
2198 (void)req;
2199 return (ENOTSUP);
2200 #else
2201 int new_value, changed;
2202 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2203 if (changed) {
2204 if ((new_value == 0) || (new_value == 1))
2205 sugid_coredump = new_value;
2206 else
2207 error = EINVAL;
2208 }
2209 return(error);
2210 #endif
2211 }
2212
2213 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2214 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2215 0, 0, sysctl_suid_coredump, "I", "");
2216
2217 #endif /* CONFIG_COREDUMP */
2218
2219 STATIC int
2220 sysctl_delayterm
2221 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2222 {
2223 struct proc *p = req->p;
2224 int new_value, changed;
2225 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2226 if (changed) {
2227 proc_lock(p);
2228 if (new_value)
2229 req->p->p_lflag |= P_LDELAYTERM;
2230 else
2231 req->p->p_lflag &= ~P_LDELAYTERM;
2232 proc_unlock(p);
2233 }
2234 return(error);
2235 }
2236
2237 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2238 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2239 0, 0, sysctl_delayterm, "I", "");
2240
2241
2242 STATIC int
2243 sysctl_rage_vnode
2244 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2245 {
2246 struct proc *p = req->p;
2247 struct uthread *ut;
2248 int new_value, old_value, changed;
2249 int error;
2250
2251 ut = get_bsdthread_info(current_thread());
2252
2253 if (ut->uu_flag & UT_RAGE_VNODES)
2254 old_value = KERN_RAGE_THREAD;
2255 else if (p->p_lflag & P_LRAGE_VNODES)
2256 old_value = KERN_RAGE_PROC;
2257 else
2258 old_value = 0;
2259
2260 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2261
2262 if (error == 0) {
2263 switch (new_value) {
2264 case KERN_RAGE_PROC:
2265 proc_lock(p);
2266 p->p_lflag |= P_LRAGE_VNODES;
2267 proc_unlock(p);
2268 break;
2269 case KERN_UNRAGE_PROC:
2270 proc_lock(p);
2271 p->p_lflag &= ~P_LRAGE_VNODES;
2272 proc_unlock(p);
2273 break;
2274
2275 case KERN_RAGE_THREAD:
2276 ut->uu_flag |= UT_RAGE_VNODES;
2277 break;
2278 case KERN_UNRAGE_THREAD:
2279 ut = get_bsdthread_info(current_thread());
2280 ut->uu_flag &= ~UT_RAGE_VNODES;
2281 break;
2282 }
2283 }
2284 return(error);
2285 }
2286
2287 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2288 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2289 0, 0, sysctl_rage_vnode, "I", "");
2290
2291 /* XXX move this interface into libproc and remove this sysctl */
2292 STATIC int
2293 sysctl_setthread_cpupercent
2294 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2295 {
2296 int new_value, old_value;
2297 int error = 0;
2298 kern_return_t kret = KERN_SUCCESS;
2299 uint8_t percent = 0;
2300 int ms_refill = 0;
2301
2302 if (!req->newptr)
2303 return (0);
2304
2305 old_value = 0;
2306
2307 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2308 return (error);
2309
2310 percent = new_value & 0xff; /* low 8 bytes for perent */
2311 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2312 if (percent > 100)
2313 return (EINVAL);
2314
2315 /*
2316 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2317 */
2318 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2319 return (EIO);
2320
2321 return (0);
2322 }
2323
2324 SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2325 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2326 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2327
2328
2329 STATIC int
2330 sysctl_kern_check_openevt
2331 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2332 {
2333 struct proc *p = req->p;
2334 int new_value, old_value, changed;
2335 int error;
2336
2337 if (p->p_flag & P_CHECKOPENEVT) {
2338 old_value = KERN_OPENEVT_PROC;
2339 } else {
2340 old_value = 0;
2341 }
2342
2343 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2344
2345 if (error == 0) {
2346 switch (new_value) {
2347 case KERN_OPENEVT_PROC:
2348 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2349 break;
2350
2351 case KERN_UNOPENEVT_PROC:
2352 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2353 break;
2354
2355 default:
2356 error = EINVAL;
2357 }
2358 }
2359 return(error);
2360 }
2361
2362 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2363 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2364
2365
2366
2367 STATIC int
2368 sysctl_nx
2369 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2370 {
2371 #ifdef SECURE_KERNEL
2372 (void)req;
2373 return ENOTSUP;
2374 #else
2375 int new_value, changed;
2376 int error;
2377
2378 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2379 if (error)
2380 return error;
2381
2382 if (changed) {
2383 #if defined(__i386__) || defined(__x86_64__)
2384 /*
2385 * Only allow setting if NX is supported on the chip
2386 */
2387 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2388 return ENOTSUP;
2389 #endif
2390 nx_enabled = new_value;
2391 }
2392 return(error);
2393 #endif /* SECURE_KERNEL */
2394 }
2395
2396
2397
2398 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2399 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2400 0, 0, sysctl_nx, "I", "");
2401
2402 STATIC int
2403 sysctl_loadavg
2404 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2405 {
2406 if (proc_is64bit(req->p)) {
2407 struct user64_loadavg loadinfo64;
2408 fill_loadavg64(&averunnable, &loadinfo64);
2409 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2410 } else {
2411 struct user32_loadavg loadinfo32;
2412 fill_loadavg32(&averunnable, &loadinfo32);
2413 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2414 }
2415 }
2416
2417 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2418 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2419 0, 0, sysctl_loadavg, "S,loadavg", "");
2420
2421 /*
2422 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2423 */
2424 STATIC int
2425 sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2426 __unused int arg2, struct sysctl_req *req)
2427 {
2428 int old_value=0, new_value=0, error=0;
2429
2430 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2431 return(error);
2432 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2433 if (!error) {
2434 return (vm_toggle_entry_reuse(new_value, NULL));
2435 }
2436 return(error);
2437 }
2438
2439 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2440
2441
2442 STATIC int
2443 sysctl_swapusage
2444 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2445 {
2446 int error;
2447 uint64_t swap_total;
2448 uint64_t swap_avail;
2449 vm_size_t swap_pagesize;
2450 boolean_t swap_encrypted;
2451 struct xsw_usage xsu;
2452
2453 error = macx_swapinfo(&swap_total,
2454 &swap_avail,
2455 &swap_pagesize,
2456 &swap_encrypted);
2457 if (error)
2458 return error;
2459
2460 xsu.xsu_total = swap_total;
2461 xsu.xsu_avail = swap_avail;
2462 xsu.xsu_used = swap_total - swap_avail;
2463 xsu.xsu_pagesize = swap_pagesize;
2464 xsu.xsu_encrypted = swap_encrypted;
2465 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2466 }
2467
2468
2469
2470 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2471 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2472 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2473
2474 #if CONFIG_FREEZE
2475 extern void vm_page_reactivate_all_throttled(void);
2476
2477 static int
2478 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2479 {
2480 #pragma unused(arg1, arg2)
2481 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2482 boolean_t disabled;
2483
2484 error = sysctl_handle_int(oidp, &val, 0, req);
2485 if (error || !req->newptr)
2486 return (error);
2487
2488 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
2489 //assert(req->newptr);
2490 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2491 return EINVAL;
2492 }
2493
2494 /*
2495 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2496 */
2497 disabled = (!val && memorystatus_freeze_enabled);
2498
2499 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2500
2501 if (disabled) {
2502 vm_page_reactivate_all_throttled();
2503 }
2504
2505 return (0);
2506 }
2507
2508 SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2509 #endif /* CONFIG_FREEZE */
2510
2511 /* this kernel does NOT implement shared_region_make_private_np() */
2512 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2513 CTLFLAG_RD | CTLFLAG_LOCKED,
2514 (int *)NULL, 0, "");
2515
2516 STATIC int
2517 fetch_process_cputype(
2518 proc_t cur_proc,
2519 int *name,
2520 u_int namelen,
2521 cpu_type_t *cputype)
2522 {
2523 proc_t p = PROC_NULL;
2524 int refheld = 0;
2525 cpu_type_t ret = 0;
2526 int error = 0;
2527
2528 if (namelen == 0)
2529 p = cur_proc;
2530 else if (namelen == 1) {
2531 p = proc_find(name[0]);
2532 if (p == NULL)
2533 return (EINVAL);
2534 refheld = 1;
2535 } else {
2536 error = EINVAL;
2537 goto out;
2538 }
2539
2540 ret = cpu_type() & ~CPU_ARCH_MASK;
2541 if (IS_64BIT_PROCESS(p))
2542 ret |= CPU_ARCH_ABI64;
2543
2544 *cputype = ret;
2545
2546 if (refheld != 0)
2547 proc_rele(p);
2548 out:
2549 return (error);
2550 }
2551
2552 STATIC int
2553 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2554 struct sysctl_req *req)
2555 {
2556 int error;
2557 cpu_type_t proc_cputype = 0;
2558 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2559 return error;
2560 int res = 1;
2561 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2562 res = 0;
2563 return SYSCTL_OUT(req, &res, sizeof(res));
2564 }
2565 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2566
2567 STATIC int
2568 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2569 struct sysctl_req *req)
2570 {
2571 int error;
2572 cpu_type_t proc_cputype = 0;
2573 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2574 return error;
2575 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2576 }
2577 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2578
2579 STATIC int
2580 sysctl_safeboot
2581 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2582 {
2583 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2584 }
2585
2586 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2587 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2588 0, 0, sysctl_safeboot, "I", "");
2589
2590 STATIC int
2591 sysctl_singleuser
2592 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2593 {
2594 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2595 }
2596
2597 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2598 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2599 0, 0, sysctl_singleuser, "I", "");
2600
2601 STATIC int sysctl_minimalboot
2602 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2603 {
2604 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
2605 }
2606
2607 SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
2608 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2609 0, 0, sysctl_minimalboot, "I", "");
2610
2611 /*
2612 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2613 */
2614 extern boolean_t affinity_sets_enabled;
2615 extern int affinity_sets_mapping;
2616
2617 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2618 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2619 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2620 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2621
2622 /*
2623 * Boolean indicating if KASLR is active.
2624 */
2625 STATIC int
2626 sysctl_slide
2627 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2628 {
2629 uint32_t slide;
2630
2631 slide = vm_kernel_slide ? 1 : 0;
2632
2633 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2634 }
2635
2636 SYSCTL_PROC(_kern, OID_AUTO, slide,
2637 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2638 0, 0, sysctl_slide, "I", "");
2639
2640 /*
2641 * Limit on total memory users can wire.
2642 *
2643 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2644 *
2645 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2646 *
2647 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2648 * kmem_init().
2649 *
2650 * All values are in bytes.
2651 */
2652
2653 vm_map_size_t vm_global_no_user_wire_amount;
2654 vm_map_size_t vm_global_user_wire_limit;
2655 vm_map_size_t vm_user_wire_limit;
2656
2657 /*
2658 * There needs to be a more automatic/elegant way to do this
2659 */
2660 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2661 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2662 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
2663
2664 extern int vm_map_copy_overwrite_aligned_src_not_internal;
2665 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2666 extern int vm_map_copy_overwrite_aligned_src_large;
2667 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2668 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2669 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
2670
2671
2672 extern uint32_t vm_page_external_count;
2673 extern uint32_t vm_page_filecache_min;
2674
2675 SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2676 SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2677
2678 extern int vm_compressor_mode;
2679 extern int vm_compressor_is_active;
2680 extern int vm_compressor_available;
2681 extern uint32_t vm_ripe_target_age;
2682 extern uint32_t swapout_target_age;
2683 extern int64_t compressor_bytes_used;
2684 extern int64_t c_segment_input_bytes;
2685 extern int64_t c_segment_compressed_bytes;
2686 extern uint32_t compressor_eval_period_in_msecs;
2687 extern uint32_t compressor_sample_min_in_msecs;
2688 extern uint32_t compressor_sample_max_in_msecs;
2689 extern uint32_t compressor_thrashing_threshold_per_10msecs;
2690 extern uint32_t compressor_thrashing_min_per_10msecs;
2691 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
2692 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
2693 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
2694 extern uint32_t vm_compressor_catchup_threshold_divisor;
2695 extern uint32_t vm_compressor_time_thread;
2696 extern uint64_t vm_compressor_thread_runtime;
2697
2698 SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
2699 SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
2700 SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2701
2702 SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
2703 SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
2704 SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
2705 SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
2706
2707 SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
2708
2709 SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2710 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2711 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2712 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2713 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2714 SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2715 SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2716 SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2717 SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2718
2719 SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2720
2721 SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
2722 SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_thread_runtime, "");
2723
2724 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
2725 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
2726 SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
2727 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
2728 SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
2729
2730 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
2731 SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
2732
2733 SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
2734
2735 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
2736 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
2737 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
2738 SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
2739 SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
2740 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
2741 SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
2742
2743 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
2744 SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
2745 SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
2746
2747 SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
2748 SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
2749 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
2750 SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
2751 SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
2752 SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
2753 SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
2754 SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
2755
2756 #if CONFIG_PHANTOM_CACHE
2757 extern uint32_t phantom_cache_thrashing_threshold;
2758 extern uint32_t phantom_cache_eval_period_in_msecs;
2759 extern uint32_t phantom_cache_thrashing_threshold_ssd;
2760
2761
2762 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2763 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2764 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2765 #endif
2766
2767 #if CONFIG_BACKGROUND_QUEUE
2768
2769 extern uint32_t vm_page_background_count;
2770 extern uint32_t vm_page_background_limit;
2771 extern uint32_t vm_page_background_target;
2772 extern uint32_t vm_page_background_internal_count;
2773 extern uint32_t vm_page_background_external_count;
2774 extern uint32_t vm_page_background_mode;
2775 extern uint32_t vm_page_background_exclude_external;
2776 extern uint64_t vm_page_background_promoted_count;
2777 extern uint64_t vm_pageout_considered_bq_internal;
2778 extern uint64_t vm_pageout_considered_bq_external;
2779 extern uint64_t vm_pageout_rejected_bq_internal;
2780 extern uint64_t vm_pageout_rejected_bq_external;
2781
2782 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
2783 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
2784 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_limit, 0, "");
2785 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
2786 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
2787 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
2788 SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
2789
2790 SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
2791 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_internal, "");
2792 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_external, "");
2793 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
2794 SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
2795
2796 #endif
2797
2798 #if (DEVELOPMENT || DEBUG)
2799
2800 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
2801 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2802 &vm_page_creation_throttled_hard, 0, "");
2803
2804 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
2805 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2806 &vm_page_creation_throttled_soft, 0, "");
2807
2808 extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
2809 extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
2810 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
2811 SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
2812
2813 extern uint32_t vm_grab_anon_overrides;
2814 extern uint32_t vm_grab_anon_nops;
2815
2816 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_overrides, 0, "");
2817 SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_nops, 0, "");
2818
2819 /* log message counters for persistence mode */
2820 extern uint32_t oslog_p_total_msgcount;
2821 extern uint32_t oslog_p_metadata_saved_msgcount;
2822 extern uint32_t oslog_p_metadata_dropped_msgcount;
2823 extern uint32_t oslog_p_error_count;
2824 extern uint32_t oslog_p_saved_msgcount;
2825 extern uint32_t oslog_p_dropped_msgcount;
2826 extern uint32_t oslog_p_boot_dropped_msgcount;
2827
2828 /* log message counters for streaming mode */
2829 extern uint32_t oslog_s_total_msgcount;
2830 extern uint32_t oslog_s_metadata_msgcount;
2831 extern uint32_t oslog_s_error_count;
2832 extern uint32_t oslog_s_streamed_msgcount;
2833 extern uint32_t oslog_s_dropped_msgcount;
2834
2835 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
2836 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
2837 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
2838 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
2839 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
2840 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
2841 SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
2842
2843 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
2844 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
2845 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
2846 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
2847 SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
2848
2849
2850 #endif /* DEVELOPMENT || DEBUG */
2851
2852 /*
2853 * Enable tracing of voucher contents
2854 */
2855 extern uint32_t ipc_voucher_trace_contents;
2856
2857 SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
2858 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
2859
2860 /*
2861 * Kernel stack size and depth
2862 */
2863 SYSCTL_INT (_kern, OID_AUTO, stack_size,
2864 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
2865 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
2866 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
2867
2868 /*
2869 * enable back trace for port allocations
2870 */
2871 extern int ipc_portbt;
2872
2873 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
2874 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2875 &ipc_portbt, 0, "");
2876
2877 /*
2878 * Scheduler sysctls
2879 */
2880
2881 SYSCTL_STRING(_kern, OID_AUTO, sched,
2882 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2883 sched_string, sizeof(sched_string),
2884 "Timeshare scheduler implementation");
2885
2886 /*
2887 * Only support runtime modification on embedded platforms
2888 * with development config enabled
2889 */
2890
2891
2892 /* Parameters related to timer coalescing tuning, to be replaced
2893 * with a dedicated systemcall in the future.
2894 */
2895 /* Enable processing pending timers in the context of any other interrupt
2896 * Coalescing tuning parameters for various thread/task attributes */
2897 STATIC int
2898 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2899 {
2900 #pragma unused(oidp)
2901 int size = arg2; /* subcommand*/
2902 int error;
2903 int changed = 0;
2904 uint64_t old_value_ns;
2905 uint64_t new_value_ns;
2906 uint64_t value_abstime;
2907 if (size == sizeof(uint32_t))
2908 value_abstime = *((uint32_t *)arg1);
2909 else if (size == sizeof(uint64_t))
2910 value_abstime = *((uint64_t *)arg1);
2911 else return ENOTSUP;
2912
2913 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
2914 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
2915 if ((error) || (!changed))
2916 return error;
2917
2918 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
2919 if (size == sizeof(uint32_t))
2920 *((uint32_t *)arg1) = (uint32_t)value_abstime;
2921 else
2922 *((uint64_t *)arg1) = value_abstime;
2923 return error;
2924 }
2925
2926 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
2927 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2928 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
2929 SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
2930 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2931 &tcoal_prio_params.timer_resort_threshold_abstime,
2932 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
2933 sysctl_timer_user_us_kernel_abstime,
2934 "Q", "");
2935 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
2936 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2937 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
2938 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
2939 sysctl_timer_user_us_kernel_abstime,
2940 "Q", "");
2941
2942 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
2943 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2944 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
2945
2946 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
2947 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2948 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
2949 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
2950 sysctl_timer_user_us_kernel_abstime,
2951 "Q", "");
2952
2953 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
2954 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2955 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
2956
2957 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
2958 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2959 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
2960 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
2961 sysctl_timer_user_us_kernel_abstime,
2962 "Q", "");
2963
2964 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
2965 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2966 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
2967
2968 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
2969 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2970 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
2971 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
2972 sysctl_timer_user_us_kernel_abstime,
2973 "Q", "");
2974
2975 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
2976 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2977 &tcoal_prio_params.latency_qos_scale[0], 0, "");
2978
2979 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
2980 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2981 &tcoal_prio_params.latency_qos_abstime_max[0],
2982 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
2983 sysctl_timer_user_us_kernel_abstime,
2984 "Q", "");
2985
2986 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
2987 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2988 &tcoal_prio_params.latency_qos_scale[1], 0, "");
2989
2990 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
2991 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2992 &tcoal_prio_params.latency_qos_abstime_max[1],
2993 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
2994 sysctl_timer_user_us_kernel_abstime,
2995 "Q", "");
2996
2997 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
2998 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2999 &tcoal_prio_params.latency_qos_scale[2], 0, "");
3000
3001 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
3002 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3003 &tcoal_prio_params.latency_qos_abstime_max[2],
3004 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
3005 sysctl_timer_user_us_kernel_abstime,
3006 "Q", "");
3007
3008 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
3009 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3010 &tcoal_prio_params.latency_qos_scale[3], 0, "");
3011
3012 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
3013 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3014 &tcoal_prio_params.latency_qos_abstime_max[3],
3015 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
3016 sysctl_timer_user_us_kernel_abstime,
3017 "Q", "");
3018
3019 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
3020 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3021 &tcoal_prio_params.latency_qos_scale[4], 0, "");
3022
3023 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
3024 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3025 &tcoal_prio_params.latency_qos_abstime_max[4],
3026 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
3027 sysctl_timer_user_us_kernel_abstime,
3028 "Q", "");
3029
3030 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
3031 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3032 &tcoal_prio_params.latency_qos_scale[5], 0, "");
3033
3034 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
3035 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
3036 &tcoal_prio_params.latency_qos_abstime_max[5],
3037 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
3038 sysctl_timer_user_us_kernel_abstime,
3039 "Q", "");
3040
3041 /* Communicate the "user idle level" heuristic to the timer layer, and
3042 * potentially other layers in the future.
3043 */
3044
3045 static int
3046 timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3047 int new_value = 0, old_value = 0, changed = 0, error;
3048
3049 old_value = timer_get_user_idle_level();
3050
3051 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
3052
3053 if (error == 0 && changed) {
3054 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
3055 error = ERANGE;
3056 }
3057
3058 return error;
3059 }
3060
3061 SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
3062 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
3063 0, 0,
3064 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
3065
3066 #if HYPERVISOR
3067 SYSCTL_INT(_kern, OID_AUTO, hv_support,
3068 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
3069 &hv_support_available, 0, "");
3070 #endif
3071
3072
3073 /*
3074 * This is set by core audio to tell tailspin (ie background tracing) how long
3075 * its smallest buffer is. Background tracing can then try to make a reasonable
3076 * decisions to try to avoid introducing so much latency that the buffers will
3077 * underflow.
3078 */
3079
3080 int min_audio_buffer_usec;
3081
3082 STATIC int
3083 sysctl_audio_buffer SYSCTL_HANDLER_ARGS
3084 {
3085 #pragma unused(oidp, arg1, arg2)
3086 int err = 0, value = 0, changed = 0;
3087 err = sysctl_io_number(req, min_audio_buffer_usec, sizeof(int), &value, &changed);
3088 if (err) goto exit;
3089
3090 if (changed) {
3091 /* writing is protected by an entitlement */
3092 if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY, 0) != 0) {
3093 err = EPERM;
3094 goto exit;
3095 }
3096 min_audio_buffer_usec = value;
3097 }
3098 exit:
3099 return err;
3100 }
3101
3102 SYSCTL_PROC(_kern, OID_AUTO, min_audio_buffer_usec, CTLFLAG_RW | CTLFLAG_ANYBODY, 0, 0, sysctl_audio_buffer, "I", "Minimum audio buffer size, in microseconds");
3103
3104 #if DEVELOPMENT || DEBUG
3105 #include <sys/sysent.h>
3106 /* This should result in a fatal exception, verifying that "sysent" is
3107 * write-protected.
3108 */
3109 static int
3110 kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
3111 uint64_t new_value = 0, old_value = 0;
3112 int changed = 0, error;
3113
3114 error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
3115 if ((error == 0) && changed) {
3116 volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
3117 *wraddr = 0;
3118 printf("sysent[0] write succeeded\n");
3119 }
3120 return error;
3121 }
3122
3123 SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
3124 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
3125 0, 0,
3126 kern_sysent_write, "I", "Attempt sysent[0] write");
3127
3128 #endif
3129
3130 #if DEVELOPMENT || DEBUG
3131 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
3132 #else
3133 SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
3134 #endif