]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_sysctl.c
8239a10fd792eea1060b071d99e3bb42c22b025c
[apple/xnu.git] / bsd / kern / kern_sysctl.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66 /*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73 /*
74 * DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/proc_internal.h>
91 #include <sys/kauth.h>
92 #include <sys/file_internal.h>
93 #include <sys/vnode_internal.h>
94 #include <sys/unistd.h>
95 #include <sys/buf.h>
96 #include <sys/ioctl.h>
97 #include <sys/namei.h>
98 #include <sys/tty.h>
99 #include <sys/disklabel.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/user.h>
103 #include <sys/aio_kern.h>
104 #include <sys/reboot.h>
105 #include <sys/memory_maintenance.h>
106 #include <sys/priv.h>
107
108 #include <security/audit/audit.h>
109 #include <kern/kalloc.h>
110
111 #include <mach/machine.h>
112 #include <mach/mach_host.h>
113 #include <mach/mach_types.h>
114 #include <mach/vm_param.h>
115 #include <kern/mach_param.h>
116 #include <kern/task.h>
117 #include <kern/thread.h>
118 #include <kern/processor.h>
119 #include <kern/debug.h>
120 #include <kern/sched_prim.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_map.h>
123 #include <mach/host_info.h>
124
125 #include <sys/mount_internal.h>
126 #include <sys/kdebug.h>
127
128 #include <IOKit/IOPlatformExpert.h>
129 #include <pexpert/pexpert.h>
130
131 #include <machine/machine_routines.h>
132 #include <machine/exec.h>
133
134 #include <vm/vm_protos.h>
135 #include <vm/vm_pageout.h>
136 #include <sys/imgsrc.h>
137 #include <kern/timer_call.h>
138
139 #if defined(__i386__) || defined(__x86_64__)
140 #include <i386/cpuid.h>
141 #endif
142
143 #if CONFIG_FREEZE
144 #include <sys/kern_memorystatus.h>
145 #endif
146
147 #if KPERF
148 #include <kperf/kperf.h>
149 #endif
150
151 #if HYPERVISOR
152 #include <kern/hv_support.h>
153 #endif
154
155 /*
156 * deliberately setting max requests to really high number
157 * so that runaway settings do not cause MALLOC overflows
158 */
159 #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
160
161 extern int aio_max_requests;
162 extern int aio_max_requests_per_process;
163 extern int aio_worker_threads;
164 extern int lowpri_IO_window_msecs;
165 extern int lowpri_IO_delay_msecs;
166 extern int nx_enabled;
167 extern int speculative_reads_disabled;
168 extern int ignore_is_ssd;
169 extern unsigned int speculative_prefetch_max;
170 extern unsigned int speculative_prefetch_max_iosize;
171 extern unsigned int preheat_max_bytes;
172 extern unsigned int preheat_min_bytes;
173 extern long numvnodes;
174
175 extern uuid_string_t bootsessionuuid_string;
176
177 extern unsigned int vm_max_delayed_work_limit;
178 extern unsigned int vm_max_batch;
179
180 extern unsigned int vm_page_free_min;
181 extern unsigned int vm_page_free_target;
182 extern unsigned int vm_page_free_reserved;
183 extern unsigned int vm_page_speculative_percentage;
184 extern unsigned int vm_page_speculative_q_age_ms;
185
186 #if (DEVELOPMENT || DEBUG)
187 extern uint32_t vm_page_creation_throttled_hard;
188 extern uint32_t vm_page_creation_throttled_soft;
189 #endif /* DEVELOPMENT || DEBUG */
190
191 /*
192 * Conditionally allow dtrace to see these functions for debugging purposes.
193 */
194 #ifdef STATIC
195 #undef STATIC
196 #endif
197 #if 0
198 #define STATIC
199 #else
200 #define STATIC static
201 #endif
202
203 extern boolean_t mach_timer_coalescing_enabled;
204
205 extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
206
207 STATIC void
208 fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
209 STATIC void
210 fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
211 STATIC void
212 fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
213 STATIC void
214 fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
215 STATIC void
216 fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
217 STATIC void
218 fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
219
220 extern int
221 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
222 #if NFSCLIENT
223 extern int
224 netboot_root(void);
225 #endif
226 int
227 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
228 proc_t p);
229 __private_extern__ kern_return_t
230 reset_vmobjectcache(unsigned int val1, unsigned int val2);
231 int
232 sysctl_procargs(int *name, u_int namelen, user_addr_t where,
233 size_t *sizep, proc_t cur_proc);
234 STATIC int
235 sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
236 proc_t cur_proc, int argc_yes);
237 int
238 sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
239 size_t newlen, void *sp, int len);
240
241 STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
242 STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
243 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
244 STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
245 STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
246 int sysdoproc_callback(proc_t p, void *arg);
247
248
249 /* forward declarations for non-static STATIC */
250 STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
251 STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
252 STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
253 STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
254 STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
255 STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
256 #if COUNT_SYSCALLS
257 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
258 #endif /* COUNT_SYSCALLS */
259 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
260 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
261 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
262 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
263 STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
264 STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
265 STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
266 STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
267 STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
268 STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269 STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270 STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271 STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272 STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273 STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274 STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275 #if NFSCLIENT
276 STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277 #endif
278 #ifdef CONFIG_IMGSRC_ACCESS
279 STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280 #endif
281 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
282 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
284 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
285 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
288 STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289 STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290 STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291 STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292 STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
293 STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297 STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
299
300
301 extern void IORegistrySetOSBuildVersion(char * build_version);
302
303 STATIC void
304 fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
305 {
306 la64->ldavg[0] = la->ldavg[0];
307 la64->ldavg[1] = la->ldavg[1];
308 la64->ldavg[2] = la->ldavg[2];
309 la64->fscale = (user64_long_t)la->fscale;
310 }
311
312 STATIC void
313 fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
314 {
315 la32->ldavg[0] = la->ldavg[0];
316 la32->ldavg[1] = la->ldavg[1];
317 la32->ldavg[2] = la->ldavg[2];
318 la32->fscale = (user32_long_t)la->fscale;
319 }
320
321 /*
322 * Attributes stored in the kernel.
323 */
324 extern char corefilename[MAXPATHLEN+1];
325 extern int do_coredump;
326 extern int sugid_coredump;
327
328 #if COUNT_SYSCALLS
329 extern int do_count_syscalls;
330 #endif
331
332 #ifdef INSECURE
333 int securelevel = -1;
334 #else
335 int securelevel;
336 #endif
337
338 STATIC int
339 sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
340 __unused int arg2, struct sysctl_req *req)
341 {
342 int error;
343 struct uthread *ut = get_bsdthread_info(current_thread());
344 user_addr_t oldp=0, newp=0;
345 size_t *oldlenp=NULL;
346 size_t newlen=0;
347
348 oldp = req->oldptr;
349 oldlenp = &(req->oldlen);
350 newp = req->newptr;
351 newlen = req->newlen;
352
353 /* We want the current length, and maybe the string itself */
354 if(oldlenp) {
355 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
356 size_t currlen = MAXTHREADNAMESIZE - 1;
357
358 if(ut->pth_name)
359 /* use length of current thread name */
360 currlen = strlen(ut->pth_name);
361 if(oldp) {
362 if(*oldlenp < currlen)
363 return ENOMEM;
364 /* NOTE - we do not copy the NULL terminator */
365 if(ut->pth_name) {
366 error = copyout(ut->pth_name,oldp,currlen);
367 if(error)
368 return error;
369 }
370 }
371 /* return length of thread name minus NULL terminator (just like strlen) */
372 req->oldidx = currlen;
373 }
374
375 /* We want to set the name to something */
376 if(newp)
377 {
378 if(newlen > (MAXTHREADNAMESIZE - 1))
379 return ENAMETOOLONG;
380 if(!ut->pth_name)
381 {
382 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
383 if(!ut->pth_name)
384 return ENOMEM;
385 }
386 bzero(ut->pth_name, MAXTHREADNAMESIZE);
387 error = copyin(newp, ut->pth_name, newlen);
388 if(error)
389 return error;
390 }
391
392 return 0;
393 }
394
395 SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
396
397 #define BSD_HOST 1
398 STATIC int
399 sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
400 {
401 host_basic_info_data_t hinfo;
402 kern_return_t kret;
403 uint32_t size;
404 int changed;
405 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
406 struct _processor_statistics_np *buf;
407 int error;
408
409 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
410 if (kret != KERN_SUCCESS) {
411 return EINVAL;
412 }
413
414 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
415
416 if (req->oldlen < size) {
417 return EINVAL;
418 }
419
420 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
421
422 kret = get_sched_statistics(buf, &size);
423 if (kret != KERN_SUCCESS) {
424 error = EINVAL;
425 goto out;
426 }
427
428 error = sysctl_io_opaque(req, buf, size, &changed);
429 if (error) {
430 goto out;
431 }
432
433 if (changed) {
434 panic("Sched info changed?!");
435 }
436 out:
437 FREE(buf, M_TEMP);
438 return error;
439 }
440
441 SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
442
443 STATIC int
444 sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
445 {
446 boolean_t active;
447 int res;
448
449 if (req->newlen != sizeof(active)) {
450 return EINVAL;
451 }
452
453 res = copyin(req->newptr, &active, sizeof(active));
454 if (res != 0) {
455 return res;
456 }
457
458 return set_sched_stats_active(active);
459 }
460
461 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
462
463 extern uint32_t sched_debug_flags;
464 SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
465
466 #if (DEBUG || DEVELOPMENT)
467 extern boolean_t doprnt_hide_pointers;
468 SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
469 #endif
470
471 extern int get_kernel_symfile(proc_t, char **);
472
473 #if COUNT_SYSCALLS
474 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
475
476 extern int nsysent;
477 extern int syscalls_log[];
478 extern const char *syscallnames[];
479
480 STATIC int
481 sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
482 {
483 __unused int cmd = oidp->oid_arg2; /* subcommand*/
484 __unused int *name = arg1; /* oid element argument vector */
485 __unused int namelen = arg2; /* number of oid element arguments */
486 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
487 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
488 user_addr_t newp = req->newptr; /* user buffer copy in address */
489 size_t newlen = req->newlen; /* user buffer copy in size */
490 int error;
491
492 int tmp;
493
494 /* valid values passed in:
495 * = 0 means don't keep called counts for each bsd syscall
496 * > 0 means keep called counts for each bsd syscall
497 * = 2 means dump current counts to the system log
498 * = 3 means reset all counts
499 * for example, to dump current counts:
500 * sysctl -w kern.count_calls=2
501 */
502 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
503 if ( error != 0 ) {
504 return (error);
505 }
506
507 if ( tmp == 1 ) {
508 do_count_syscalls = 1;
509 }
510 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
511 int i;
512 for ( i = 0; i < nsysent; i++ ) {
513 if ( syscalls_log[i] != 0 ) {
514 if ( tmp == 2 ) {
515 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
516 }
517 else {
518 syscalls_log[i] = 0;
519 }
520 }
521 }
522 if ( tmp != 0 ) {
523 do_count_syscalls = 1;
524 }
525 }
526
527 /* adjust index so we return the right required/consumed amount */
528 if (!error)
529 req->oldidx += req->oldlen;
530
531 return (error);
532 }
533 SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
534 0, /* Pointer argument (arg1) */
535 0, /* Integer argument (arg2) */
536 sysctl_docountsyscalls, /* Handler function */
537 NULL, /* Data pointer */
538 "");
539 #endif /* COUNT_SYSCALLS */
540
541 /*
542 * The following sysctl_* functions should not be used
543 * any more, as they can only cope with callers in
544 * user mode: Use new-style
545 * sysctl_io_number()
546 * sysctl_io_string()
547 * sysctl_io_opaque()
548 * instead.
549 */
550
551 /*
552 * Validate parameters and get old / set new parameters
553 * for an integer-valued sysctl function.
554 */
555 int
556 sysctl_int(user_addr_t oldp, size_t *oldlenp,
557 user_addr_t newp, size_t newlen, int *valp)
558 {
559 int error = 0;
560
561 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
562 return (EFAULT);
563 if (oldp && *oldlenp < sizeof(int))
564 return (ENOMEM);
565 if (newp && newlen != sizeof(int))
566 return (EINVAL);
567 *oldlenp = sizeof(int);
568 if (oldp)
569 error = copyout(valp, oldp, sizeof(int));
570 if (error == 0 && newp) {
571 error = copyin(newp, valp, sizeof(int));
572 AUDIT_ARG(value32, *valp);
573 }
574 return (error);
575 }
576
577 /*
578 * Validate parameters and get old / set new parameters
579 * for an quad(64bit)-valued sysctl function.
580 */
581 int
582 sysctl_quad(user_addr_t oldp, size_t *oldlenp,
583 user_addr_t newp, size_t newlen, quad_t *valp)
584 {
585 int error = 0;
586
587 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
588 return (EFAULT);
589 if (oldp && *oldlenp < sizeof(quad_t))
590 return (ENOMEM);
591 if (newp && newlen != sizeof(quad_t))
592 return (EINVAL);
593 *oldlenp = sizeof(quad_t);
594 if (oldp)
595 error = copyout(valp, oldp, sizeof(quad_t));
596 if (error == 0 && newp)
597 error = copyin(newp, valp, sizeof(quad_t));
598 return (error);
599 }
600
601 STATIC int
602 sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
603 {
604 if (p->p_pid != (pid_t)*(int*)arg)
605 return(0);
606 else
607 return(1);
608 }
609
610 STATIC int
611 sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
612 {
613 if (p->p_pgrpid != (pid_t)*(int*)arg)
614 return(0);
615 else
616 return(1);
617 }
618
619 STATIC int
620 sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
621 {
622 int retval;
623 struct tty *tp;
624
625 /* This is very racy but list lock is held.. Hmmm. */
626 if ((p->p_flag & P_CONTROLT) == 0 ||
627 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
628 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
629 tp->t_dev != (dev_t)*(int*)arg)
630 retval = 0;
631 else
632 retval = 1;
633
634 return(retval);
635 }
636
637 STATIC int
638 sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
639 {
640 kauth_cred_t my_cred;
641 uid_t uid;
642
643 if (p->p_ucred == NULL)
644 return(0);
645 my_cred = kauth_cred_proc_ref(p);
646 uid = kauth_cred_getuid(my_cred);
647 kauth_cred_unref(&my_cred);
648
649 if (uid != (uid_t)*(int*)arg)
650 return(0);
651 else
652 return(1);
653 }
654
655
656 STATIC int
657 sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
658 {
659 kauth_cred_t my_cred;
660 uid_t ruid;
661
662 if (p->p_ucred == NULL)
663 return(0);
664 my_cred = kauth_cred_proc_ref(p);
665 ruid = kauth_cred_getruid(my_cred);
666 kauth_cred_unref(&my_cred);
667
668 if (ruid != (uid_t)*(int*)arg)
669 return(0);
670 else
671 return(1);
672 }
673
674 /*
675 * try over estimating by 5 procs
676 */
677 #define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
678 struct sysdoproc_args {
679 int buflen;
680 void *kprocp;
681 boolean_t is_64_bit;
682 user_addr_t dp;
683 size_t needed;
684 int sizeof_kproc;
685 int *errorp;
686 int uidcheck;
687 int ruidcheck;
688 int ttycheck;
689 int uidval;
690 };
691
692 int
693 sysdoproc_callback(proc_t p, void *arg)
694 {
695 struct sysdoproc_args *args = arg;
696
697 if (args->buflen >= args->sizeof_kproc) {
698 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
699 return (PROC_RETURNED);
700 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
701 return (PROC_RETURNED);
702 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
703 return (PROC_RETURNED);
704
705 bzero(args->kprocp, args->sizeof_kproc);
706 if (args->is_64_bit)
707 fill_user64_proc(p, args->kprocp);
708 else
709 fill_user32_proc(p, args->kprocp);
710 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
711 if (error) {
712 *args->errorp = error;
713 return (PROC_RETURNED_DONE);
714 }
715 args->dp += args->sizeof_kproc;
716 args->buflen -= args->sizeof_kproc;
717 }
718 args->needed += args->sizeof_kproc;
719 return (PROC_RETURNED);
720 }
721
722 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
723 STATIC int
724 sysctl_prochandle SYSCTL_HANDLER_ARGS
725 {
726 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
727 int *name = arg1; /* oid element argument vector */
728 int namelen = arg2; /* number of oid element arguments */
729 user_addr_t where = req->oldptr;/* user buffer copy out address */
730
731 user_addr_t dp = where;
732 size_t needed = 0;
733 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
734 int error = 0;
735 boolean_t is_64_bit = proc_is64bit(current_proc());
736 struct user32_kinfo_proc user32_kproc;
737 struct user64_kinfo_proc user_kproc;
738 int sizeof_kproc;
739 void *kprocp;
740 int (*filterfn)(proc_t, void *) = 0;
741 struct sysdoproc_args args;
742 int uidcheck = 0;
743 int ruidcheck = 0;
744 int ttycheck = 0;
745
746 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
747 return (EINVAL);
748
749 if (is_64_bit) {
750 sizeof_kproc = sizeof(user_kproc);
751 kprocp = &user_kproc;
752 } else {
753 sizeof_kproc = sizeof(user32_kproc);
754 kprocp = &user32_kproc;
755 }
756
757 switch (cmd) {
758
759 case KERN_PROC_PID:
760 filterfn = sysdoproc_filt_KERN_PROC_PID;
761 break;
762
763 case KERN_PROC_PGRP:
764 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
765 break;
766
767 case KERN_PROC_TTY:
768 ttycheck = 1;
769 break;
770
771 case KERN_PROC_UID:
772 uidcheck = 1;
773 break;
774
775 case KERN_PROC_RUID:
776 ruidcheck = 1;
777 break;
778
779 case KERN_PROC_ALL:
780 break;
781
782 default:
783 /* must be kern.proc.<unknown> */
784 return (ENOTSUP);
785 }
786
787 error = 0;
788 args.buflen = buflen;
789 args.kprocp = kprocp;
790 args.is_64_bit = is_64_bit;
791 args.dp = dp;
792 args.needed = needed;
793 args.errorp = &error;
794 args.uidcheck = uidcheck;
795 args.ruidcheck = ruidcheck;
796 args.ttycheck = ttycheck;
797 args.sizeof_kproc = sizeof_kproc;
798 if (namelen)
799 args.uidval = name[0];
800
801 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
802 sysdoproc_callback, &args, filterfn, name);
803
804 if (error)
805 return (error);
806
807 dp = args.dp;
808 needed = args.needed;
809
810 if (where != USER_ADDR_NULL) {
811 req->oldlen = dp - where;
812 if (needed > req->oldlen)
813 return (ENOMEM);
814 } else {
815 needed += KERN_PROCSLOP;
816 req->oldlen = needed;
817 }
818 /* adjust index so we return the right required/consumed amount */
819 req->oldidx += req->oldlen;
820 return (0);
821 }
822
823 /*
824 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
825 * in the sysctl declaration itself, which comes into the handler function
826 * as 'oidp->oid_arg2'.
827 *
828 * For these particular sysctls, since they have well known OIDs, we could
829 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
830 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
831 * of a well known value with a common handler function. This is desirable,
832 * because we want well known values to "go away" at some future date.
833 *
834 * It should be noted that the value of '((int *)arg1)[1]' is used for many
835 * an integer parameter to the subcommand for many of these sysctls; we'd
836 * rather have used '((int *)arg1)[0]' for that, or even better, an element
837 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
838 * and then use leaf-node permissions enforcement, but that would have
839 * necessitated modifying user space code to correspond to the interface
840 * change, and we are striving for binary backward compatibility here; even
841 * though these are SPI, and not intended for use by user space applications
842 * which are not themselves system tools or libraries, some applications
843 * have erroneously used them.
844 */
845 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
846 0, /* Pointer argument (arg1) */
847 KERN_PROC_ALL, /* Integer argument (arg2) */
848 sysctl_prochandle, /* Handler function */
849 NULL, /* Data is size variant on ILP32/LP64 */
850 "");
851 SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
852 0, /* Pointer argument (arg1) */
853 KERN_PROC_PID, /* Integer argument (arg2) */
854 sysctl_prochandle, /* Handler function */
855 NULL, /* Data is size variant on ILP32/LP64 */
856 "");
857 SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
858 0, /* Pointer argument (arg1) */
859 KERN_PROC_TTY, /* Integer argument (arg2) */
860 sysctl_prochandle, /* Handler function */
861 NULL, /* Data is size variant on ILP32/LP64 */
862 "");
863 SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
864 0, /* Pointer argument (arg1) */
865 KERN_PROC_PGRP, /* Integer argument (arg2) */
866 sysctl_prochandle, /* Handler function */
867 NULL, /* Data is size variant on ILP32/LP64 */
868 "");
869 SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
870 0, /* Pointer argument (arg1) */
871 KERN_PROC_UID, /* Integer argument (arg2) */
872 sysctl_prochandle, /* Handler function */
873 NULL, /* Data is size variant on ILP32/LP64 */
874 "");
875 SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
876 0, /* Pointer argument (arg1) */
877 KERN_PROC_RUID, /* Integer argument (arg2) */
878 sysctl_prochandle, /* Handler function */
879 NULL, /* Data is size variant on ILP32/LP64 */
880 "");
881 SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
882 0, /* Pointer argument (arg1) */
883 KERN_PROC_LCID, /* Integer argument (arg2) */
884 sysctl_prochandle, /* Handler function */
885 NULL, /* Data is size variant on ILP32/LP64 */
886 "");
887
888
889 /*
890 * Fill in non-zero fields of an eproc structure for the specified process.
891 */
892 STATIC void
893 fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
894 {
895 struct tty *tp;
896 struct pgrp *pg;
897 struct session *sessp;
898 kauth_cred_t my_cred;
899
900 pg = proc_pgrp(p);
901 sessp = proc_session(p);
902
903 if (pg != PGRP_NULL) {
904 ep->e_pgid = p->p_pgrpid;
905 ep->e_jobc = pg->pg_jobc;
906 if (sessp != SESSION_NULL && sessp->s_ttyvp)
907 ep->e_flag = EPROC_CTTY;
908 }
909 ep->e_ppid = p->p_ppid;
910 if (p->p_ucred) {
911 my_cred = kauth_cred_proc_ref(p);
912
913 /* A fake historical pcred */
914 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
915 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
916 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
917 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
918
919 /* A fake historical *kauth_cred_t */
920 ep->e_ucred.cr_ref = my_cred->cr_ref;
921 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
922 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
923 bcopy(posix_cred_get(my_cred)->cr_groups,
924 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
925
926 kauth_cred_unref(&my_cred);
927 }
928
929 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
930 (tp = SESSION_TP(sessp))) {
931 ep->e_tdev = tp->t_dev;
932 ep->e_tpgid = sessp->s_ttypgrpid;
933 } else
934 ep->e_tdev = NODEV;
935
936 if (sessp != SESSION_NULL) {
937 if (SESS_LEADER(p, sessp))
938 ep->e_flag |= EPROC_SLEADER;
939 session_rele(sessp);
940 }
941 if (pg != PGRP_NULL)
942 pg_rele(pg);
943 }
944
945 /*
946 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
947 */
948 STATIC void
949 fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
950 {
951 struct tty *tp;
952 struct pgrp *pg;
953 struct session *sessp;
954 kauth_cred_t my_cred;
955
956 pg = proc_pgrp(p);
957 sessp = proc_session(p);
958
959 if (pg != PGRP_NULL) {
960 ep->e_pgid = p->p_pgrpid;
961 ep->e_jobc = pg->pg_jobc;
962 if (sessp != SESSION_NULL && sessp->s_ttyvp)
963 ep->e_flag = EPROC_CTTY;
964 }
965 ep->e_ppid = p->p_ppid;
966 if (p->p_ucred) {
967 my_cred = kauth_cred_proc_ref(p);
968
969 /* A fake historical pcred */
970 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
971 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
972 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
973 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
974
975 /* A fake historical *kauth_cred_t */
976 ep->e_ucred.cr_ref = my_cred->cr_ref;
977 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
978 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
979 bcopy(posix_cred_get(my_cred)->cr_groups,
980 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
981
982 kauth_cred_unref(&my_cred);
983 }
984
985 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
986 (tp = SESSION_TP(sessp))) {
987 ep->e_tdev = tp->t_dev;
988 ep->e_tpgid = sessp->s_ttypgrpid;
989 } else
990 ep->e_tdev = NODEV;
991
992 if (sessp != SESSION_NULL) {
993 if (SESS_LEADER(p, sessp))
994 ep->e_flag |= EPROC_SLEADER;
995 session_rele(sessp);
996 }
997 if (pg != PGRP_NULL)
998 pg_rele(pg);
999 }
1000
1001 /*
1002 * Fill in an eproc structure for the specified process.
1003 * bzeroed by our caller, so only set non-zero fields.
1004 */
1005 STATIC void
1006 fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1007 {
1008 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1009 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1010 exp->p_flag = p->p_flag;
1011 if (p->p_lflag & P_LTRACED)
1012 exp->p_flag |= P_TRACED;
1013 if (p->p_lflag & P_LPPWAIT)
1014 exp->p_flag |= P_PPWAIT;
1015 if (p->p_lflag & P_LEXIT)
1016 exp->p_flag |= P_WEXIT;
1017 exp->p_stat = p->p_stat;
1018 exp->p_pid = p->p_pid;
1019 exp->p_oppid = p->p_oppid;
1020 /* Mach related */
1021 exp->user_stack = p->user_stack;
1022 exp->p_debugger = p->p_debugger;
1023 exp->sigwait = p->sigwait;
1024 /* scheduling */
1025 #ifdef _PROC_HAS_SCHEDINFO_
1026 exp->p_estcpu = p->p_estcpu;
1027 exp->p_pctcpu = p->p_pctcpu;
1028 exp->p_slptime = p->p_slptime;
1029 #endif
1030 exp->p_realtimer.it_interval.tv_sec =
1031 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1032 exp->p_realtimer.it_interval.tv_usec =
1033 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1034
1035 exp->p_realtimer.it_value.tv_sec =
1036 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1037 exp->p_realtimer.it_value.tv_usec =
1038 (__int32_t)p->p_realtimer.it_value.tv_usec;
1039
1040 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1041 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1042
1043 exp->p_sigignore = p->p_sigignore;
1044 exp->p_sigcatch = p->p_sigcatch;
1045 exp->p_priority = p->p_priority;
1046 exp->p_nice = p->p_nice;
1047 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1048 exp->p_xstat = p->p_xstat;
1049 exp->p_acflag = p->p_acflag;
1050 }
1051
1052 /*
1053 * Fill in an LP64 version of extern_proc structure for the specified process.
1054 */
1055 STATIC void
1056 fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1057 {
1058 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1059 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1060 exp->p_flag = p->p_flag;
1061 if (p->p_lflag & P_LTRACED)
1062 exp->p_flag |= P_TRACED;
1063 if (p->p_lflag & P_LPPWAIT)
1064 exp->p_flag |= P_PPWAIT;
1065 if (p->p_lflag & P_LEXIT)
1066 exp->p_flag |= P_WEXIT;
1067 exp->p_stat = p->p_stat;
1068 exp->p_pid = p->p_pid;
1069 exp->p_oppid = p->p_oppid;
1070 /* Mach related */
1071 exp->user_stack = p->user_stack;
1072 exp->p_debugger = p->p_debugger;
1073 exp->sigwait = p->sigwait;
1074 /* scheduling */
1075 #ifdef _PROC_HAS_SCHEDINFO_
1076 exp->p_estcpu = p->p_estcpu;
1077 exp->p_pctcpu = p->p_pctcpu;
1078 exp->p_slptime = p->p_slptime;
1079 #endif
1080 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1081 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1082
1083 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1084 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1085
1086 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1087 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1088
1089 exp->p_sigignore = p->p_sigignore;
1090 exp->p_sigcatch = p->p_sigcatch;
1091 exp->p_priority = p->p_priority;
1092 exp->p_nice = p->p_nice;
1093 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1094 exp->p_xstat = p->p_xstat;
1095 exp->p_acflag = p->p_acflag;
1096 }
1097
1098 STATIC void
1099 fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1100 {
1101 /* on a 64 bit kernel, 32 bit users get some truncated information */
1102 fill_user32_externproc(p, &kp->kp_proc);
1103 fill_user32_eproc(p, &kp->kp_eproc);
1104 }
1105
1106 STATIC void
1107 fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1108 {
1109 fill_user64_externproc(p, &kp->kp_proc);
1110 fill_user64_eproc(p, &kp->kp_eproc);
1111 }
1112
1113 STATIC int
1114 sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1115 {
1116 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1117 int *name = arg1; /* oid element argument vector */
1118 int namelen = arg2; /* number of oid element arguments */
1119 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1120 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1121 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1122 // size_t newlen = req->newlen; /* user buffer copy in size */
1123
1124 proc_t p = current_proc();
1125 int ret=0;
1126
1127 if (namelen == 0)
1128 return(ENOTSUP);
1129
1130 ret = suser(kauth_cred_get(), &p->p_acflag);
1131 #if KPERF
1132 /* Non-root processes may be blessed by kperf to access data
1133 * logged into trace.
1134 */
1135 if (ret)
1136 ret = kperf_access_check();
1137 #endif /* KPERF */
1138 if (ret)
1139 return(ret);
1140
1141 switch(name[0]) {
1142 case KERN_KDEFLAGS:
1143 case KERN_KDDFLAGS:
1144 case KERN_KDENABLE:
1145 case KERN_KDGETBUF:
1146 case KERN_KDSETUP:
1147 case KERN_KDREMOVE:
1148 case KERN_KDSETREG:
1149 case KERN_KDGETREG:
1150 case KERN_KDREADTR:
1151 case KERN_KDWRITETR:
1152 case KERN_KDWRITEMAP:
1153 case KERN_KDPIDTR:
1154 case KERN_KDTHRMAP:
1155 case KERN_KDPIDEX:
1156 case KERN_KDSETRTCDEC:
1157 case KERN_KDSETBUF:
1158 case KERN_KDGETENTROPY:
1159 case KERN_KDENABLE_BG_TRACE:
1160 case KERN_KDDISABLE_BG_TRACE:
1161 case KERN_KDREADCURTHRMAP:
1162 case KERN_KDSET_TYPEFILTER:
1163 case KERN_KDBUFWAIT:
1164 case KERN_KDCPUMAP:
1165 case KERN_KDWAIT_BG_TRACE_RESET:
1166 case KERN_KDSET_BG_TYPEFILTER:
1167 case KERN_KDWRITEMAP_V3:
1168 case KERN_KDWRITETR_V3:
1169 ret = kdbg_control(name, namelen, oldp, oldlenp);
1170 break;
1171 default:
1172 ret= ENOTSUP;
1173 break;
1174 }
1175
1176 /* adjust index so we return the right required/consumed amount */
1177 if (!ret)
1178 req->oldidx += req->oldlen;
1179
1180 return (ret);
1181 }
1182 SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1183 0, /* Pointer argument (arg1) */
1184 0, /* Integer argument (arg2) */
1185 sysctl_kdebug_ops, /* Handler function */
1186 NULL, /* Data pointer */
1187 "");
1188
1189
1190 /*
1191 * Return the top *sizep bytes of the user stack, or the entire area of the
1192 * user stack down through the saved exec_path, whichever is smaller.
1193 */
1194 STATIC int
1195 sysctl_doprocargs SYSCTL_HANDLER_ARGS
1196 {
1197 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1198 int *name = arg1; /* oid element argument vector */
1199 int namelen = arg2; /* number of oid element arguments */
1200 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1201 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1202 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1203 // size_t newlen = req->newlen; /* user buffer copy in size */
1204 int error;
1205
1206 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1207
1208 /* adjust index so we return the right required/consumed amount */
1209 if (!error)
1210 req->oldidx += req->oldlen;
1211
1212 return (error);
1213 }
1214 SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1215 0, /* Pointer argument (arg1) */
1216 0, /* Integer argument (arg2) */
1217 sysctl_doprocargs, /* Handler function */
1218 NULL, /* Data pointer */
1219 "");
1220
1221 STATIC int
1222 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1223 {
1224 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1225 int *name = arg1; /* oid element argument vector */
1226 int namelen = arg2; /* number of oid element arguments */
1227 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1228 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1229 // user_addr_t newp = req->newptr; /* user buffer copy in address */
1230 // size_t newlen = req->newlen; /* user buffer copy in size */
1231 int error;
1232
1233 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1234
1235 /* adjust index so we return the right required/consumed amount */
1236 if (!error)
1237 req->oldidx += req->oldlen;
1238
1239 return (error);
1240 }
1241 SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1242 0, /* Pointer argument (arg1) */
1243 0, /* Integer argument (arg2) */
1244 sysctl_doprocargs2, /* Handler function */
1245 NULL, /* Data pointer */
1246 "");
1247
1248 STATIC int
1249 sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1250 size_t *sizep, proc_t cur_proc, int argc_yes)
1251 {
1252 proc_t p;
1253 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1254 int error = 0;
1255 struct _vm_map *proc_map;
1256 struct task * task;
1257 vm_map_copy_t tmp;
1258 user_addr_t arg_addr;
1259 size_t arg_size;
1260 caddr_t data;
1261 size_t argslen=0;
1262 int size;
1263 vm_offset_t copy_start, copy_end;
1264 kern_return_t ret;
1265 int pid;
1266 kauth_cred_t my_cred;
1267 uid_t uid;
1268
1269 if ( namelen < 1 )
1270 return(EINVAL);
1271
1272 if (argc_yes)
1273 buflen -= sizeof(int); /* reserve first word to return argc */
1274
1275 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1276 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1277 /* is not NULL then the caller wants us to return the length needed to */
1278 /* hold the data we would return */
1279 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1280 return(EINVAL);
1281 }
1282 arg_size = buflen;
1283
1284 /*
1285 * Lookup process by pid
1286 */
1287 pid = name[0];
1288 p = proc_find(pid);
1289 if (p == NULL) {
1290 return(EINVAL);
1291 }
1292
1293 /*
1294 * Copy the top N bytes of the stack.
1295 * On all machines we have so far, the stack grows
1296 * downwards.
1297 *
1298 * If the user expects no more than N bytes of
1299 * argument list, use that as a guess for the
1300 * size.
1301 */
1302
1303 if (!p->user_stack) {
1304 proc_rele(p);
1305 return(EINVAL);
1306 }
1307
1308 if (where == USER_ADDR_NULL) {
1309 /* caller only wants to know length of proc args data */
1310 if (sizep == NULL) {
1311 proc_rele(p);
1312 return(EFAULT);
1313 }
1314
1315 size = p->p_argslen;
1316 proc_rele(p);
1317 if (argc_yes) {
1318 size += sizeof(int);
1319 }
1320 else {
1321 /*
1322 * old PROCARGS will return the executable's path and plus some
1323 * extra space for work alignment and data tags
1324 */
1325 size += PATH_MAX + (6 * sizeof(int));
1326 }
1327 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1328 *sizep = size;
1329 return (0);
1330 }
1331
1332 my_cred = kauth_cred_proc_ref(p);
1333 uid = kauth_cred_getuid(my_cred);
1334 kauth_cred_unref(&my_cred);
1335
1336 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1337 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1338 proc_rele(p);
1339 return (EINVAL);
1340 }
1341
1342 if ((u_int)arg_size > p->p_argslen)
1343 arg_size = round_page(p->p_argslen);
1344
1345 arg_addr = p->user_stack - arg_size;
1346
1347
1348 /*
1349 * Before we can block (any VM code), make another
1350 * reference to the map to keep it alive. We do
1351 * that by getting a reference on the task itself.
1352 */
1353 task = p->task;
1354 if (task == NULL) {
1355 proc_rele(p);
1356 return(EINVAL);
1357 }
1358
1359 argslen = p->p_argslen;
1360 /*
1361 * Once we have a task reference we can convert that into a
1362 * map reference, which we will use in the calls below. The
1363 * task/process may change its map after we take this reference
1364 * (see execve), but the worst that will happen then is a return
1365 * of stale info (which is always a possibility).
1366 */
1367 task_reference(task);
1368 proc_rele(p);
1369 proc_map = get_task_map_reference(task);
1370 task_deallocate(task);
1371
1372 if (proc_map == NULL)
1373 return(EINVAL);
1374
1375
1376 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size), VM_KERN_MEMORY_BSD);
1377 if (ret != KERN_SUCCESS) {
1378 vm_map_deallocate(proc_map);
1379 return(ENOMEM);
1380 }
1381
1382 copy_end = round_page(copy_start + arg_size);
1383
1384 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1385 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1386 vm_map_deallocate(proc_map);
1387 kmem_free(kernel_map, copy_start,
1388 round_page(arg_size));
1389 return (EIO);
1390 }
1391
1392 /*
1393 * Now that we've done the copyin from the process'
1394 * map, we can release the reference to it.
1395 */
1396 vm_map_deallocate(proc_map);
1397
1398 if( vm_map_copy_overwrite(kernel_map,
1399 (vm_map_address_t)copy_start,
1400 tmp, FALSE) != KERN_SUCCESS) {
1401 kmem_free(kernel_map, copy_start,
1402 round_page(arg_size));
1403 return (EIO);
1404 }
1405
1406 if (arg_size > argslen) {
1407 data = (caddr_t) (copy_end - argslen);
1408 size = argslen;
1409 } else {
1410 data = (caddr_t) (copy_end - arg_size);
1411 size = arg_size;
1412 }
1413
1414 /*
1415 * When these sysctls were introduced, the first string in the strings
1416 * section was just the bare path of the executable. However, for security
1417 * reasons we now prefix this string with executable_path= so it can be
1418 * parsed getenv style. To avoid binary compatability issues with exising
1419 * callers of this sysctl, we strip it off here if present.
1420 * (rdar://problem/13746466)
1421 */
1422 #define EXECUTABLE_KEY "executable_path="
1423 if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
1424 data += strlen(EXECUTABLE_KEY);
1425 size -= strlen(EXECUTABLE_KEY);
1426 }
1427
1428 if (argc_yes) {
1429 /* Put processes argc as the first word in the copyout buffer */
1430 suword(where, p->p_argc);
1431 error = copyout(data, (where + sizeof(int)), size);
1432 size += sizeof(int);
1433 } else {
1434 error = copyout(data, where, size);
1435
1436 /*
1437 * Make the old PROCARGS work to return the executable's path
1438 * But, only if there is enough space in the provided buffer
1439 *
1440 * on entry: data [possibily] points to the beginning of the path
1441 *
1442 * Note: we keep all pointers&sizes aligned to word boundries
1443 */
1444 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1445 {
1446 int binPath_sz, alignedBinPath_sz = 0;
1447 int extraSpaceNeeded, addThis;
1448 user_addr_t placeHere;
1449 char * str = (char *) data;
1450 int max_len = size;
1451
1452 /* Some apps are really bad about messing up their stacks
1453 So, we have to be extra careful about getting the length
1454 of the executing binary. If we encounter an error, we bail.
1455 */
1456
1457 /* Limit ourselves to PATH_MAX paths */
1458 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1459
1460 binPath_sz = 0;
1461
1462 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1463 binPath_sz++;
1464
1465 /* If we have a NUL terminator, copy it, too */
1466 if (binPath_sz < max_len-1) binPath_sz += 1;
1467
1468 /* Pre-Flight the space requiremnts */
1469
1470 /* Account for the padding that fills out binPath to the next word */
1471 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1472
1473 placeHere = where + size;
1474
1475 /* Account for the bytes needed to keep placeHere word aligned */
1476 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1477
1478 /* Add up all the space that is needed */
1479 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1480
1481 /* is there is room to tack on argv[0]? */
1482 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1483 {
1484 placeHere += addThis;
1485 suword(placeHere, 0);
1486 placeHere += sizeof(int);
1487 suword(placeHere, 0xBFFF0000);
1488 placeHere += sizeof(int);
1489 suword(placeHere, 0);
1490 placeHere += sizeof(int);
1491 error = copyout(data, placeHere, binPath_sz);
1492 if ( ! error )
1493 {
1494 placeHere += binPath_sz;
1495 suword(placeHere, 0);
1496 size += extraSpaceNeeded;
1497 }
1498 }
1499 }
1500 }
1501
1502 if (copy_start != (vm_offset_t) 0) {
1503 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1504 }
1505 if (error) {
1506 return(error);
1507 }
1508
1509 if (where != USER_ADDR_NULL)
1510 *sizep = size;
1511 return (0);
1512 }
1513
1514
1515 /*
1516 * Max number of concurrent aio requests
1517 */
1518 STATIC int
1519 sysctl_aiomax
1520 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1521 {
1522 int new_value, changed;
1523 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1524 if (changed) {
1525 /* make sure the system-wide limit is greater than the per process limit */
1526 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1527 aio_max_requests = new_value;
1528 else
1529 error = EINVAL;
1530 }
1531 return(error);
1532 }
1533
1534
1535 /*
1536 * Max number of concurrent aio requests per process
1537 */
1538 STATIC int
1539 sysctl_aioprocmax
1540 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1541 {
1542 int new_value, changed;
1543 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1544 if (changed) {
1545 /* make sure per process limit is less than the system-wide limit */
1546 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1547 aio_max_requests_per_process = new_value;
1548 else
1549 error = EINVAL;
1550 }
1551 return(error);
1552 }
1553
1554
1555 /*
1556 * Max number of async IO worker threads
1557 */
1558 STATIC int
1559 sysctl_aiothreads
1560 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1561 {
1562 int new_value, changed;
1563 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1564 if (changed) {
1565 /* we only allow an increase in the number of worker threads */
1566 if (new_value > aio_worker_threads ) {
1567 _aio_create_worker_threads((new_value - aio_worker_threads));
1568 aio_worker_threads = new_value;
1569 }
1570 else
1571 error = EINVAL;
1572 }
1573 return(error);
1574 }
1575
1576
1577 /*
1578 * System-wide limit on the max number of processes
1579 */
1580 STATIC int
1581 sysctl_maxproc
1582 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1583 {
1584 int new_value, changed;
1585 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1586 if (changed) {
1587 AUDIT_ARG(value32, new_value);
1588 /* make sure the system-wide limit is less than the configured hard
1589 limit set at kernel compilation */
1590 if (new_value <= hard_maxproc && new_value > 0)
1591 maxproc = new_value;
1592 else
1593 error = EINVAL;
1594 }
1595 return(error);
1596 }
1597
1598 SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1599 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1600 ostype, 0, "");
1601 SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1602 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1603 osrelease, 0, "");
1604 SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1605 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1606 (int *)NULL, BSD, "");
1607 SYSCTL_STRING(_kern, KERN_VERSION, version,
1608 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1609 version, 0, "");
1610 SYSCTL_STRING(_kern, OID_AUTO, uuid,
1611 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1612 &kernel_uuid_string[0], 0, "");
1613
1614 #if DEBUG
1615 int debug_kprint_syscall = 0;
1616 char debug_kprint_syscall_process[MAXCOMLEN+1];
1617
1618 /* Thread safe: bits and string value are not used to reclaim state */
1619 SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1620 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1621 SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1622 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1623 "name of process for kprintf syscall tracing");
1624
1625 int debug_kprint_current_process(const char **namep)
1626 {
1627 struct proc *p = current_proc();
1628
1629 if (p == NULL) {
1630 return 0;
1631 }
1632
1633 if (debug_kprint_syscall_process[0]) {
1634 /* user asked to scope tracing to a particular process name */
1635 if(0 == strncmp(debug_kprint_syscall_process,
1636 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1637 /* no value in telling the user that we traced what they asked */
1638 if(namep) *namep = NULL;
1639
1640 return 1;
1641 } else {
1642 return 0;
1643 }
1644 }
1645
1646 /* trace all processes. Tell user what we traced */
1647 if (namep) {
1648 *namep = p->p_comm;
1649 }
1650
1651 return 1;
1652 }
1653 #endif
1654
1655 /* PR-5293665: need to use a callback function for kern.osversion to set
1656 * osversion in IORegistry */
1657
1658 STATIC int
1659 sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1660 {
1661 int rval = 0;
1662
1663 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1664
1665 if (req->newptr) {
1666 IORegistrySetOSBuildVersion((char *)arg1);
1667 }
1668
1669 return rval;
1670 }
1671
1672 SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1673 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1674 osversion, 256 /* OSVERSIZE*/,
1675 sysctl_osversion, "A", "");
1676
1677 STATIC int
1678 sysctl_sysctl_bootargs
1679 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1680 {
1681 int error;
1682 char buf[256];
1683
1684 strlcpy(buf, PE_boot_args(), 256);
1685 error = sysctl_io_string(req, buf, 256, 0, NULL);
1686 return(error);
1687 }
1688
1689 SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1690 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1691 NULL, 0,
1692 sysctl_sysctl_bootargs, "A", "bootargs");
1693
1694 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1695 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1696 &maxfiles, 0, "");
1697 SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1698 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1699 (int *)NULL, ARG_MAX, "");
1700 SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1701 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1702 (int *)NULL, _POSIX_VERSION, "");
1703 SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1704 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1705 (int *)NULL, NGROUPS_MAX, "");
1706 SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1707 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1708 (int *)NULL, 1, "");
1709 #if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1710 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1711 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1712 (int *)NULL, 1, "");
1713 #else
1714 SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1715 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1716 NULL, 0, "");
1717 #endif
1718 SYSCTL_INT(_kern, OID_AUTO, num_files,
1719 CTLFLAG_RD | CTLFLAG_LOCKED,
1720 &nfiles, 0, "");
1721 SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1722 CTLFLAG_RD | CTLFLAG_LOCKED,
1723 &numvnodes, 0, "");
1724 SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1725 CTLFLAG_RD | CTLFLAG_LOCKED,
1726 &task_max, 0, "");
1727 SYSCTL_INT(_kern, OID_AUTO, num_threads,
1728 CTLFLAG_RD | CTLFLAG_LOCKED,
1729 &thread_max, 0, "");
1730 SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1731 CTLFLAG_RD | CTLFLAG_LOCKED,
1732 &task_threadmax, 0, "");
1733
1734 STATIC int
1735 sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1736 {
1737 int oldval = desiredvnodes;
1738 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1739
1740 if (oldval != desiredvnodes) {
1741 reset_vmobjectcache(oldval, desiredvnodes);
1742 resize_namecache(desiredvnodes);
1743 }
1744
1745 return(error);
1746 }
1747
1748 SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1749 CTLFLAG_RW | CTLFLAG_LOCKED,
1750 &nc_disabled, 0, "");
1751
1752 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1753 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1754 0, 0, sysctl_maxvnodes, "I", "");
1755
1756 SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1757 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1758 0, 0, sysctl_maxproc, "I", "");
1759
1760 SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1761 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1762 0, 0, sysctl_aiomax, "I", "");
1763
1764 SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1765 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1766 0, 0, sysctl_aioprocmax, "I", "");
1767
1768 SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1769 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1770 0, 0, sysctl_aiothreads, "I", "");
1771
1772 #if (DEVELOPMENT || DEBUG)
1773 extern int sched_smt_balance;
1774 SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1775 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1776 &sched_smt_balance, 0, "");
1777 #endif
1778
1779 STATIC int
1780 sysctl_securelvl
1781 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1782 {
1783 int new_value, changed;
1784 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1785 if (changed) {
1786 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1787 proc_list_lock();
1788 securelevel = new_value;
1789 proc_list_unlock();
1790 } else {
1791 error = EPERM;
1792 }
1793 }
1794 return(error);
1795 }
1796
1797 SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1798 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1799 0, 0, sysctl_securelvl, "I", "");
1800
1801
1802 STATIC int
1803 sysctl_domainname
1804 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1805 {
1806 int error, changed;
1807 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1808 if (changed) {
1809 domainnamelen = strlen(domainname);
1810 }
1811 return(error);
1812 }
1813
1814 SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1815 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1816 0, 0, sysctl_domainname, "A", "");
1817
1818 SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1819 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1820 &hostid, 0, "");
1821
1822 STATIC int
1823 sysctl_hostname
1824 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1825 {
1826 int error, changed;
1827 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1828 if (changed) {
1829 hostnamelen = req->newlen;
1830 }
1831 return(error);
1832 }
1833
1834
1835 SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1836 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1837 0, 0, sysctl_hostname, "A", "");
1838
1839 STATIC int
1840 sysctl_procname
1841 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1842 {
1843 /* Original code allowed writing, I'm copying this, although this all makes
1844 no sense to me. Besides, this sysctl is never used. */
1845 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1846 }
1847
1848 SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
1849 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1850 0, 0, sysctl_procname, "A", "");
1851
1852 SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
1853 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1854 &speculative_reads_disabled, 0, "");
1855
1856 SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
1857 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1858 &ignore_is_ssd, 0, "");
1859
1860 SYSCTL_INT(_kern, OID_AUTO, root_is_CF_drive,
1861 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1862 &root_is_CF_drive, 0, "");
1863
1864 SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
1865 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1866 &preheat_max_bytes, 0, "");
1867
1868 SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
1869 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1870 &preheat_min_bytes, 0, "");
1871
1872 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1873 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1874 &speculative_prefetch_max, 0, "");
1875
1876 SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
1877 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1878 &speculative_prefetch_max_iosize, 0, "");
1879
1880 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
1881 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1882 &vm_page_free_target, 0, "");
1883
1884 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
1885 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1886 &vm_page_free_min, 0, "");
1887
1888 SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
1889 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1890 &vm_page_free_reserved, 0, "");
1891
1892 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
1893 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1894 &vm_page_speculative_percentage, 0, "");
1895
1896 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
1897 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1898 &vm_page_speculative_q_age_ms, 0, "");
1899
1900 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
1901 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1902 &vm_max_delayed_work_limit, 0, "");
1903
1904 SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
1905 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1906 &vm_max_batch, 0, "");
1907
1908 SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
1909 CTLFLAG_RD | CTLFLAG_LOCKED,
1910 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
1911
1912 STATIC int
1913 sysctl_boottime
1914 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1915 {
1916 time_t tv_sec = boottime_sec();
1917 struct proc *p = req->p;
1918
1919 if (proc_is64bit(p)) {
1920 struct user64_timeval t;
1921 t.tv_sec = tv_sec;
1922 t.tv_usec = 0;
1923 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1924 } else {
1925 struct user32_timeval t;
1926 t.tv_sec = tv_sec;
1927 t.tv_usec = 0;
1928 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1929 }
1930 }
1931
1932 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
1933 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1934 0, 0, sysctl_boottime, "S,timeval", "");
1935
1936 STATIC int
1937 sysctl_symfile
1938 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1939 {
1940 char *str;
1941 int error = get_kernel_symfile(req->p, &str);
1942 if (error)
1943 return (error);
1944 return sysctl_io_string(req, str, 0, 0, NULL);
1945 }
1946
1947
1948 SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
1949 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
1950 0, 0, sysctl_symfile, "A", "");
1951
1952 #if NFSCLIENT
1953 STATIC int
1954 sysctl_netboot
1955 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1956 {
1957 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
1958 }
1959
1960 SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
1961 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1962 0, 0, sysctl_netboot, "I", "");
1963 #endif
1964
1965 #ifdef CONFIG_IMGSRC_ACCESS
1966 /*
1967 * Legacy--act as if only one layer of nesting is possible.
1968 */
1969 STATIC int
1970 sysctl_imgsrcdev
1971 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1972 {
1973 vfs_context_t ctx = vfs_context_current();
1974 vnode_t devvp;
1975 int result;
1976
1977 if (!vfs_context_issuser(ctx)) {
1978 return EPERM;
1979 }
1980
1981 if (imgsrc_rootvnodes[0] == NULL) {
1982 return ENOENT;
1983 }
1984
1985 result = vnode_getwithref(imgsrc_rootvnodes[0]);
1986 if (result != 0) {
1987 return result;
1988 }
1989
1990 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
1991 result = vnode_getwithref(devvp);
1992 if (result != 0) {
1993 goto out;
1994 }
1995
1996 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
1997
1998 vnode_put(devvp);
1999 out:
2000 vnode_put(imgsrc_rootvnodes[0]);
2001 return result;
2002 }
2003
2004 SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
2005 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2006 0, 0, sysctl_imgsrcdev, "I", "");
2007
2008 STATIC int
2009 sysctl_imgsrcinfo
2010 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2011 {
2012 int error;
2013 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
2014 uint32_t i;
2015 vnode_t rvp, devvp;
2016
2017 if (imgsrc_rootvnodes[0] == NULLVP) {
2018 return ENXIO;
2019 }
2020
2021 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2022 /*
2023 * Go get the root vnode.
2024 */
2025 rvp = imgsrc_rootvnodes[i];
2026 if (rvp == NULLVP) {
2027 break;
2028 }
2029
2030 error = vnode_get(rvp);
2031 if (error != 0) {
2032 return error;
2033 }
2034
2035 /*
2036 * For now, no getting at a non-local volume.
2037 */
2038 devvp = vnode_mount(rvp)->mnt_devvp;
2039 if (devvp == NULL) {
2040 vnode_put(rvp);
2041 return EINVAL;
2042 }
2043
2044 error = vnode_getwithref(devvp);
2045 if (error != 0) {
2046 vnode_put(rvp);
2047 return error;
2048 }
2049
2050 /*
2051 * Fill in info.
2052 */
2053 info[i].ii_dev = vnode_specrdev(devvp);
2054 info[i].ii_flags = 0;
2055 info[i].ii_height = i;
2056 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2057
2058 vnode_put(devvp);
2059 vnode_put(rvp);
2060 }
2061
2062 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2063 }
2064
2065 SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2066 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2067 0, 0, sysctl_imgsrcinfo, "I", "");
2068
2069 #endif /* CONFIG_IMGSRC_ACCESS */
2070
2071
2072 SYSCTL_DECL(_kern_timer);
2073 SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2074
2075
2076 SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2077 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2078 &mach_timer_coalescing_enabled, 0, "");
2079
2080 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2081 CTLFLAG_RW | CTLFLAG_LOCKED,
2082 &timer_deadline_tracking_bin_1, "");
2083 SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2084 CTLFLAG_RW | CTLFLAG_LOCKED,
2085 &timer_deadline_tracking_bin_2, "");
2086
2087 SYSCTL_DECL(_kern_timer_longterm);
2088 SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2089
2090
2091 /* Must match definition in osfmk/kern/timer_call.c */
2092 enum {
2093 THRESHOLD, QCOUNT,
2094 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2095 LATENCY, LATENCY_MIN, LATENCY_MAX
2096 };
2097 extern uint64_t timer_sysctl_get(int);
2098 extern int timer_sysctl_set(int, uint64_t);
2099
2100 STATIC int
2101 sysctl_timer
2102 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2103 {
2104 int oid = (int)arg1;
2105 uint64_t value = timer_sysctl_get(oid);
2106 uint64_t new_value;
2107 int error;
2108 int changed;
2109
2110 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2111 if (changed)
2112 error = timer_sysctl_set(oid, new_value);
2113
2114 return error;
2115 }
2116
2117 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2118 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2119 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2120 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2121 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2122 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
2123 #if DEBUG
2124 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2125 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2126 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2127 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2128 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2129 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2130 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2131 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2132 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2133 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2134 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2135 (void *) SCANS, 0, sysctl_timer, "Q", "");
2136 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2137 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2138 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2139 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2140 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2141 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2142 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2143 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2144 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2145 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2146 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2147 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2148 #endif /* DEBUG */
2149
2150 STATIC int
2151 sysctl_usrstack
2152 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2153 {
2154 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2155 }
2156
2157 SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2158 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2159 0, 0, sysctl_usrstack, "I", "");
2160
2161 STATIC int
2162 sysctl_usrstack64
2163 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2164 {
2165 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2166 }
2167
2168 SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2169 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2170 0, 0, sysctl_usrstack64, "Q", "");
2171
2172 SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2173 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2174 corefilename, sizeof(corefilename), "");
2175
2176 STATIC int
2177 sysctl_coredump
2178 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2179 {
2180 #ifdef SECURE_KERNEL
2181 (void)req;
2182 return (ENOTSUP);
2183 #else
2184 int new_value, changed;
2185 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2186 if (changed) {
2187 if ((new_value == 0) || (new_value == 1))
2188 do_coredump = new_value;
2189 else
2190 error = EINVAL;
2191 }
2192 return(error);
2193 #endif
2194 }
2195
2196 SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2197 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2198 0, 0, sysctl_coredump, "I", "");
2199
2200 STATIC int
2201 sysctl_suid_coredump
2202 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2203 {
2204 #ifdef SECURE_KERNEL
2205 (void)req;
2206 return (ENOTSUP);
2207 #else
2208 int new_value, changed;
2209 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2210 if (changed) {
2211 if ((new_value == 0) || (new_value == 1))
2212 sugid_coredump = new_value;
2213 else
2214 error = EINVAL;
2215 }
2216 return(error);
2217 #endif
2218 }
2219
2220 SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2221 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2222 0, 0, sysctl_suid_coredump, "I", "");
2223
2224 STATIC int
2225 sysctl_delayterm
2226 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2227 {
2228 struct proc *p = req->p;
2229 int new_value, changed;
2230 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2231 if (changed) {
2232 proc_lock(p);
2233 if (new_value)
2234 req->p->p_lflag |= P_LDELAYTERM;
2235 else
2236 req->p->p_lflag &= ~P_LDELAYTERM;
2237 proc_unlock(p);
2238 }
2239 return(error);
2240 }
2241
2242 SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2243 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2244 0, 0, sysctl_delayterm, "I", "");
2245
2246
2247 STATIC int
2248 sysctl_rage_vnode
2249 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2250 {
2251 struct proc *p = req->p;
2252 struct uthread *ut;
2253 int new_value, old_value, changed;
2254 int error;
2255
2256 ut = get_bsdthread_info(current_thread());
2257
2258 if (ut->uu_flag & UT_RAGE_VNODES)
2259 old_value = KERN_RAGE_THREAD;
2260 else if (p->p_lflag & P_LRAGE_VNODES)
2261 old_value = KERN_RAGE_PROC;
2262 else
2263 old_value = 0;
2264
2265 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2266
2267 if (error == 0) {
2268 switch (new_value) {
2269 case KERN_RAGE_PROC:
2270 proc_lock(p);
2271 p->p_lflag |= P_LRAGE_VNODES;
2272 proc_unlock(p);
2273 break;
2274 case KERN_UNRAGE_PROC:
2275 proc_lock(p);
2276 p->p_lflag &= ~P_LRAGE_VNODES;
2277 proc_unlock(p);
2278 break;
2279
2280 case KERN_RAGE_THREAD:
2281 ut->uu_flag |= UT_RAGE_VNODES;
2282 break;
2283 case KERN_UNRAGE_THREAD:
2284 ut = get_bsdthread_info(current_thread());
2285 ut->uu_flag &= ~UT_RAGE_VNODES;
2286 break;
2287 }
2288 }
2289 return(error);
2290 }
2291
2292 SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2293 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2294 0, 0, sysctl_rage_vnode, "I", "");
2295
2296 /* XXX move this interface into libproc and remove this sysctl */
2297 STATIC int
2298 sysctl_setthread_cpupercent
2299 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2300 {
2301 int new_value, old_value;
2302 int error = 0;
2303 kern_return_t kret = KERN_SUCCESS;
2304 uint8_t percent = 0;
2305 int ms_refill = 0;
2306
2307 if (!req->newptr)
2308 return (0);
2309
2310 old_value = 0;
2311
2312 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2313 return (error);
2314
2315 percent = new_value & 0xff; /* low 8 bytes for perent */
2316 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2317 if (percent > 100)
2318 return (EINVAL);
2319
2320 /*
2321 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2322 */
2323 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2324 return (EIO);
2325
2326 return (0);
2327 }
2328
2329 SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2330 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2331 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2332
2333
2334 STATIC int
2335 sysctl_kern_check_openevt
2336 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2337 {
2338 struct proc *p = req->p;
2339 int new_value, old_value, changed;
2340 int error;
2341
2342 if (p->p_flag & P_CHECKOPENEVT) {
2343 old_value = KERN_OPENEVT_PROC;
2344 } else {
2345 old_value = 0;
2346 }
2347
2348 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2349
2350 if (error == 0) {
2351 switch (new_value) {
2352 case KERN_OPENEVT_PROC:
2353 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2354 break;
2355
2356 case KERN_UNOPENEVT_PROC:
2357 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2358 break;
2359
2360 default:
2361 error = EINVAL;
2362 }
2363 }
2364 return(error);
2365 }
2366
2367 SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2368 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2369
2370
2371
2372 STATIC int
2373 sysctl_nx
2374 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2375 {
2376 #ifdef SECURE_KERNEL
2377 (void)req;
2378 return ENOTSUP;
2379 #else
2380 int new_value, changed;
2381 int error;
2382
2383 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2384 if (error)
2385 return error;
2386
2387 if (changed) {
2388 #if defined(__i386__) || defined(__x86_64__)
2389 /*
2390 * Only allow setting if NX is supported on the chip
2391 */
2392 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2393 return ENOTSUP;
2394 #endif
2395 nx_enabled = new_value;
2396 }
2397 return(error);
2398 #endif /* SECURE_KERNEL */
2399 }
2400
2401
2402
2403 SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2404 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2405 0, 0, sysctl_nx, "I", "");
2406
2407 STATIC int
2408 sysctl_loadavg
2409 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2410 {
2411 if (proc_is64bit(req->p)) {
2412 struct user64_loadavg loadinfo64;
2413 fill_loadavg64(&averunnable, &loadinfo64);
2414 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2415 } else {
2416 struct user32_loadavg loadinfo32;
2417 fill_loadavg32(&averunnable, &loadinfo32);
2418 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2419 }
2420 }
2421
2422 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2423 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2424 0, 0, sysctl_loadavg, "S,loadavg", "");
2425
2426 /*
2427 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2428 */
2429 STATIC int
2430 sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2431 __unused int arg2, struct sysctl_req *req)
2432 {
2433 int old_value=0, new_value=0, error=0;
2434
2435 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2436 return(error);
2437 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2438 if (!error) {
2439 return (vm_toggle_entry_reuse(new_value, NULL));
2440 }
2441 return(error);
2442 }
2443
2444 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2445
2446 STATIC int
2447 sysctl_swapusage
2448 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2449 {
2450 int error;
2451 uint64_t swap_total;
2452 uint64_t swap_avail;
2453 vm_size_t swap_pagesize;
2454 boolean_t swap_encrypted;
2455 struct xsw_usage xsu;
2456
2457 error = macx_swapinfo(&swap_total,
2458 &swap_avail,
2459 &swap_pagesize,
2460 &swap_encrypted);
2461 if (error)
2462 return error;
2463
2464 xsu.xsu_total = swap_total;
2465 xsu.xsu_avail = swap_avail;
2466 xsu.xsu_used = swap_total - swap_avail;
2467 xsu.xsu_pagesize = swap_pagesize;
2468 xsu.xsu_encrypted = swap_encrypted;
2469 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2470 }
2471
2472
2473
2474 SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2475 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2476 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2477
2478 #if CONFIG_FREEZE
2479 extern void vm_page_reactivate_all_throttled(void);
2480
2481 static int
2482 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2483 {
2484 #pragma unused(arg1, arg2)
2485 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2486 boolean_t disabled;
2487
2488 error = sysctl_handle_int(oidp, &val, 0, req);
2489 if (error || !req->newptr)
2490 return (error);
2491
2492 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
2493 //assert(req->newptr);
2494 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2495 return EINVAL;
2496 }
2497
2498 /*
2499 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2500 */
2501 disabled = (!val && memorystatus_freeze_enabled);
2502
2503 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2504
2505 if (disabled) {
2506 vm_page_reactivate_all_throttled();
2507 }
2508
2509 return (0);
2510 }
2511
2512 SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2513 #endif /* CONFIG_FREEZE */
2514
2515 /* this kernel does NOT implement shared_region_make_private_np() */
2516 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2517 CTLFLAG_RD | CTLFLAG_LOCKED,
2518 (int *)NULL, 0, "");
2519
2520 STATIC int
2521 fetch_process_cputype(
2522 proc_t cur_proc,
2523 int *name,
2524 u_int namelen,
2525 cpu_type_t *cputype)
2526 {
2527 proc_t p = PROC_NULL;
2528 int refheld = 0;
2529 cpu_type_t ret = 0;
2530 int error = 0;
2531
2532 if (namelen == 0)
2533 p = cur_proc;
2534 else if (namelen == 1) {
2535 p = proc_find(name[0]);
2536 if (p == NULL)
2537 return (EINVAL);
2538 refheld = 1;
2539 } else {
2540 error = EINVAL;
2541 goto out;
2542 }
2543
2544 ret = cpu_type() & ~CPU_ARCH_MASK;
2545 if (IS_64BIT_PROCESS(p))
2546 ret |= CPU_ARCH_ABI64;
2547
2548 *cputype = ret;
2549
2550 if (refheld != 0)
2551 proc_rele(p);
2552 out:
2553 return (error);
2554 }
2555
2556 STATIC int
2557 sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2558 struct sysctl_req *req)
2559 {
2560 int error;
2561 cpu_type_t proc_cputype = 0;
2562 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2563 return error;
2564 int res = 1;
2565 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2566 res = 0;
2567 return SYSCTL_OUT(req, &res, sizeof(res));
2568 }
2569 SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2570
2571 STATIC int
2572 sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2573 struct sysctl_req *req)
2574 {
2575 int error;
2576 cpu_type_t proc_cputype = 0;
2577 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2578 return error;
2579 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2580 }
2581 SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2582
2583 STATIC int
2584 sysctl_safeboot
2585 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2586 {
2587 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2588 }
2589
2590 SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2591 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2592 0, 0, sysctl_safeboot, "I", "");
2593
2594 STATIC int
2595 sysctl_singleuser
2596 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2597 {
2598 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2599 }
2600
2601 SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2602 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2603 0, 0, sysctl_singleuser, "I", "");
2604
2605 STATIC int sysctl_minimalboot
2606 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2607 {
2608 return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
2609 }
2610
2611 SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
2612 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2613 0, 0, sysctl_minimalboot, "I", "");
2614
2615 /*
2616 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2617 */
2618 extern boolean_t affinity_sets_enabled;
2619 extern int affinity_sets_mapping;
2620
2621 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2622 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2623 SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2624 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2625
2626 /*
2627 * Boolean indicating if KASLR is active.
2628 */
2629 STATIC int
2630 sysctl_slide
2631 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2632 {
2633 uint32_t slide;
2634
2635 slide = vm_kernel_slide ? 1 : 0;
2636
2637 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2638 }
2639
2640 SYSCTL_PROC(_kern, OID_AUTO, slide,
2641 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2642 0, 0, sysctl_slide, "I", "");
2643
2644 /*
2645 * Limit on total memory users can wire.
2646 *
2647 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2648 *
2649 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2650 *
2651 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2652 * kmem_init().
2653 *
2654 * All values are in bytes.
2655 */
2656
2657 vm_map_size_t vm_global_no_user_wire_amount;
2658 vm_map_size_t vm_global_user_wire_limit;
2659 vm_map_size_t vm_user_wire_limit;
2660
2661 /*
2662 * There needs to be a more automatic/elegant way to do this
2663 */
2664 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2665 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2666 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
2667
2668 extern int vm_map_copy_overwrite_aligned_src_not_internal;
2669 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2670 extern int vm_map_copy_overwrite_aligned_src_large;
2671 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2672 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2673 SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
2674
2675
2676 extern uint32_t vm_page_external_count;
2677 extern uint32_t vm_page_filecache_min;
2678
2679 SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2680 SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2681
2682 extern int vm_compressor_mode;
2683 extern int vm_compressor_is_active;
2684 extern int vm_compressor_available;
2685 extern uint32_t vm_ripe_target_age;
2686 extern uint32_t swapout_target_age;
2687 extern int64_t compressor_bytes_used;
2688 extern int64_t c_segment_input_bytes;
2689 extern int64_t c_segment_compressed_bytes;
2690 extern uint32_t compressor_eval_period_in_msecs;
2691 extern uint32_t compressor_sample_min_in_msecs;
2692 extern uint32_t compressor_sample_max_in_msecs;
2693 extern uint32_t compressor_thrashing_threshold_per_10msecs;
2694 extern uint32_t compressor_thrashing_min_per_10msecs;
2695 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
2696 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
2697 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
2698 extern uint32_t vm_compressor_catchup_threshold_divisor;
2699
2700 SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
2701 SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
2702 SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2703
2704 SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
2705 SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
2706 SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
2707 SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
2708
2709 SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
2710
2711 SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2712 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2713 SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2714 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2715 SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2716 SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2717 SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2718 SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2719 SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2720
2721 SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2722
2723 #if CONFIG_PHANTOM_CACHE
2724 extern uint32_t phantom_cache_thrashing_threshold;
2725 extern uint32_t phantom_cache_eval_period_in_msecs;
2726 extern uint32_t phantom_cache_thrashing_threshold_ssd;
2727
2728
2729 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2730 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2731 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2732 #endif
2733
2734 #if (DEVELOPMENT || DEBUG)
2735
2736 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
2737 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2738 &vm_page_creation_throttled_hard, 0, "");
2739
2740 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
2741 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2742 &vm_page_creation_throttled_soft, 0, "");
2743
2744 #endif /* DEVELOPMENT || DEBUG */
2745
2746 /*
2747 * Enable tracing of voucher contents
2748 */
2749 extern uint32_t ipc_voucher_trace_contents;
2750
2751 SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
2752 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
2753
2754 /*
2755 * Kernel stack size and depth
2756 */
2757 SYSCTL_INT (_kern, OID_AUTO, stack_size,
2758 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
2759 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
2760 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
2761
2762 /*
2763 * enable back trace for port allocations
2764 */
2765 extern int ipc_portbt;
2766
2767 SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
2768 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2769 &ipc_portbt, 0, "");
2770
2771 /*
2772 * Scheduler sysctls
2773 */
2774
2775 SYSCTL_STRING(_kern, OID_AUTO, sched,
2776 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2777 sched_string, sizeof(sched_string),
2778 "Timeshare scheduler implementation");
2779
2780 /*
2781 * Only support runtime modification on embedded platforms
2782 * with development config enabled
2783 */
2784
2785
2786 /* Parameters related to timer coalescing tuning, to be replaced
2787 * with a dedicated systemcall in the future.
2788 */
2789 /* Enable processing pending timers in the context of any other interrupt
2790 * Coalescing tuning parameters for various thread/task attributes */
2791 STATIC int
2792 sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2793 {
2794 #pragma unused(oidp)
2795 int size = arg2; /* subcommand*/
2796 int error;
2797 int changed = 0;
2798 uint64_t old_value_ns;
2799 uint64_t new_value_ns;
2800 uint64_t value_abstime;
2801 if (size == sizeof(uint32_t))
2802 value_abstime = *((uint32_t *)arg1);
2803 else if (size == sizeof(uint64_t))
2804 value_abstime = *((uint64_t *)arg1);
2805 else return ENOTSUP;
2806
2807 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
2808 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
2809 if ((error) || (!changed))
2810 return error;
2811
2812 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
2813 if (size == sizeof(uint32_t))
2814 *((uint32_t *)arg1) = (uint32_t)value_abstime;
2815 else
2816 *((uint64_t *)arg1) = value_abstime;
2817 return error;
2818 }
2819
2820 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
2821 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2822 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
2823 SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
2824 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2825 &tcoal_prio_params.timer_resort_threshold_abstime,
2826 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
2827 sysctl_timer_user_us_kernel_abstime,
2828 "Q", "");
2829 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
2830 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2831 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
2832 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
2833 sysctl_timer_user_us_kernel_abstime,
2834 "Q", "");
2835
2836 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
2837 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2838 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
2839
2840 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
2841 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2842 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
2843 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
2844 sysctl_timer_user_us_kernel_abstime,
2845 "Q", "");
2846
2847 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
2848 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2849 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
2850
2851 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
2852 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2853 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
2854 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
2855 sysctl_timer_user_us_kernel_abstime,
2856 "Q", "");
2857
2858 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
2859 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2860 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
2861
2862 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
2863 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2864 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
2865 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
2866 sysctl_timer_user_us_kernel_abstime,
2867 "Q", "");
2868
2869 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
2870 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2871 &tcoal_prio_params.latency_qos_scale[0], 0, "");
2872
2873 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
2874 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2875 &tcoal_prio_params.latency_qos_abstime_max[0],
2876 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
2877 sysctl_timer_user_us_kernel_abstime,
2878 "Q", "");
2879
2880 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
2881 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2882 &tcoal_prio_params.latency_qos_scale[1], 0, "");
2883
2884 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
2885 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2886 &tcoal_prio_params.latency_qos_abstime_max[1],
2887 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
2888 sysctl_timer_user_us_kernel_abstime,
2889 "Q", "");
2890
2891 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
2892 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2893 &tcoal_prio_params.latency_qos_scale[2], 0, "");
2894
2895 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
2896 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2897 &tcoal_prio_params.latency_qos_abstime_max[2],
2898 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
2899 sysctl_timer_user_us_kernel_abstime,
2900 "Q", "");
2901
2902 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
2903 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2904 &tcoal_prio_params.latency_qos_scale[3], 0, "");
2905
2906 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
2907 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2908 &tcoal_prio_params.latency_qos_abstime_max[3],
2909 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
2910 sysctl_timer_user_us_kernel_abstime,
2911 "Q", "");
2912
2913 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
2914 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2915 &tcoal_prio_params.latency_qos_scale[4], 0, "");
2916
2917 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
2918 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2919 &tcoal_prio_params.latency_qos_abstime_max[4],
2920 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
2921 sysctl_timer_user_us_kernel_abstime,
2922 "Q", "");
2923
2924 SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
2925 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2926 &tcoal_prio_params.latency_qos_scale[5], 0, "");
2927
2928 SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
2929 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2930 &tcoal_prio_params.latency_qos_abstime_max[5],
2931 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
2932 sysctl_timer_user_us_kernel_abstime,
2933 "Q", "");
2934
2935 /* Communicate the "user idle level" heuristic to the timer layer, and
2936 * potentially other layers in the future.
2937 */
2938
2939 static int
2940 timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
2941 int new_value = 0, old_value = 0, changed = 0, error;
2942
2943 old_value = timer_get_user_idle_level();
2944
2945 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2946
2947 if (error == 0 && changed) {
2948 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
2949 error = ERANGE;
2950 }
2951
2952 return error;
2953 }
2954
2955 SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
2956 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2957 0, 0,
2958 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
2959
2960 #if HYPERVISOR
2961 SYSCTL_INT(_kern, OID_AUTO, hv_support,
2962 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2963 &hv_support_available, 0, "");
2964 #endif
2965
2966