]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_sysctl.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
65 */
66/*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections. This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73/*
74* DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86#include <sys/param.h>
87#include <sys/systm.h>
88#include <sys/kernel.h>
89#include <sys/malloc.h>
90#include <sys/proc_internal.h>
91#include <sys/kauth.h>
92#include <sys/file_internal.h>
93#include <sys/vnode_internal.h>
94#include <sys/unistd.h>
95#include <sys/buf.h>
96#include <sys/ioctl.h>
97#include <sys/namei.h>
98#include <sys/tty.h>
99#include <sys/disklabel.h>
100#include <sys/vm.h>
101#include <sys/sysctl.h>
102#include <sys/user.h>
103#include <sys/aio_kern.h>
104#include <sys/reboot.h>
105
106#include <security/audit/audit.h>
107#include <kern/kalloc.h>
108
109#include <mach/machine.h>
110#include <mach/mach_host.h>
111#include <mach/mach_types.h>
112#include <mach/vm_param.h>
113#include <kern/mach_param.h>
114#include <kern/task.h>
115#include <kern/thread.h>
116#include <kern/processor.h>
117#include <kern/debug.h>
118#include <vm/vm_kern.h>
119#include <vm/vm_map.h>
120#include <mach/host_info.h>
121
122#include <sys/mount_internal.h>
123#include <sys/kdebug.h>
124
125#include <IOKit/IOPlatformExpert.h>
126#include <pexpert/pexpert.h>
127
128#include <machine/machine_routines.h>
129#include <machine/exec.h>
130
131#include <vm/vm_protos.h>
132#include <vm/vm_pageout.h>
133#include <sys/imgsrc.h>
134#include <kern/timer_call.h>
135
136#if defined(__i386__) || defined(__x86_64__)
137#include <i386/cpuid.h>
138#endif
139
140#if CONFIG_FREEZE
141#include <sys/kern_memorystatus.h>
142#endif
143
144#if KPERF
145#include <kperf/kperf.h>
146#endif
147
148#if HYPERVISOR
149#include <kern/hv_support.h>
150#endif
151
152/*
153 * deliberately setting max requests to really high number
154 * so that runaway settings do not cause MALLOC overflows
155 */
156#define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
157
158extern int aio_max_requests;
159extern int aio_max_requests_per_process;
160extern int aio_worker_threads;
161extern int lowpri_IO_window_msecs;
162extern int lowpri_IO_delay_msecs;
163extern int nx_enabled;
164extern int speculative_reads_disabled;
165extern int ignore_is_ssd;
166extern unsigned int speculative_prefetch_max;
167extern unsigned int speculative_prefetch_max_iosize;
168extern unsigned int preheat_max_bytes;
169extern unsigned int preheat_min_bytes;
170extern long numvnodes;
171
172extern uuid_string_t bootsessionuuid_string;
173
174extern unsigned int vm_max_delayed_work_limit;
175extern unsigned int vm_max_batch;
176
177extern unsigned int vm_page_free_min;
178extern unsigned int vm_page_free_target;
179extern unsigned int vm_page_free_reserved;
180extern unsigned int vm_page_speculative_percentage;
181extern unsigned int vm_page_speculative_q_age_ms;
182
183#if (DEVELOPMENT || DEBUG)
184extern uint32_t vm_page_creation_throttled_hard;
185extern uint32_t vm_page_creation_throttled_soft;
186#endif /* DEVELOPMENT || DEBUG */
187
188/*
189 * Conditionally allow dtrace to see these functions for debugging purposes.
190 */
191#ifdef STATIC
192#undef STATIC
193#endif
194#if 0
195#define STATIC
196#else
197#define STATIC static
198#endif
199
200extern boolean_t mach_timer_coalescing_enabled;
201
202extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
203
204STATIC void
205fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
206STATIC void
207fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
208STATIC void
209fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
210STATIC void
211fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
212STATIC void
213fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
214STATIC void
215fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
216
217extern int
218kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
219#if NFSCLIENT
220extern int
221netboot_root(void);
222#endif
223int
224pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
225 proc_t p);
226__private_extern__ kern_return_t
227reset_vmobjectcache(unsigned int val1, unsigned int val2);
228int
229sysctl_procargs(int *name, u_int namelen, user_addr_t where,
230 size_t *sizep, proc_t cur_proc);
231STATIC int
232sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
233 proc_t cur_proc, int argc_yes);
234int
235sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
236 size_t newlen, void *sp, int len);
237
238STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
239STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
240STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
241STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
242STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
243#if CONFIG_LCTX
244STATIC int sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
245#endif
246int sysdoproc_callback(proc_t p, void *arg);
247
248
249/* forward declarations for non-static STATIC */
250STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
251STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
252STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
253STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
254STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
255STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
256#if COUNT_SYSCALLS
257STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
258#endif /* COUNT_SYSCALLS */
259STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
260STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
261STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
262STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
263STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
264STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
265STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
266STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
267STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
268STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
271STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
273STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
274STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275#if NFSCLIENT
276STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277#endif
278#ifdef CONFIG_IMGSRC_ACCESS
279STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280#endif
281STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
282STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
284STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
285STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
288STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
293STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
294STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
295STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
296STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
297STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
298
299
300extern void IORegistrySetOSBuildVersion(char * build_version);
301
302STATIC void
303fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
304{
305 la64->ldavg[0] = la->ldavg[0];
306 la64->ldavg[1] = la->ldavg[1];
307 la64->ldavg[2] = la->ldavg[2];
308 la64->fscale = (user64_long_t)la->fscale;
309}
310
311STATIC void
312fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
313{
314 la32->ldavg[0] = la->ldavg[0];
315 la32->ldavg[1] = la->ldavg[1];
316 la32->ldavg[2] = la->ldavg[2];
317 la32->fscale = (user32_long_t)la->fscale;
318}
319
320/*
321 * Attributes stored in the kernel.
322 */
323extern char corefilename[MAXPATHLEN+1];
324extern int do_coredump;
325extern int sugid_coredump;
326
327#if COUNT_SYSCALLS
328extern int do_count_syscalls;
329#endif
330
331#ifdef INSECURE
332int securelevel = -1;
333#else
334int securelevel;
335#endif
336
337STATIC int
338sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1,
339 __unused int arg2, struct sysctl_req *req)
340{
341 int error;
342 struct uthread *ut = get_bsdthread_info(current_thread());
343 user_addr_t oldp=0, newp=0;
344 size_t *oldlenp=NULL;
345 size_t newlen=0;
346
347 oldp = req->oldptr;
348 oldlenp = &(req->oldlen);
349 newp = req->newptr;
350 newlen = req->newlen;
351
352 /* We want the current length, and maybe the string itself */
353 if(oldlenp) {
354 /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
355 size_t currlen = MAXTHREADNAMESIZE - 1;
356
357 if(ut->pth_name)
358 /* use length of current thread name */
359 currlen = strlen(ut->pth_name);
360 if(oldp) {
361 if(*oldlenp < currlen)
362 return ENOMEM;
363 /* NOTE - we do not copy the NULL terminator */
364 if(ut->pth_name) {
365 error = copyout(ut->pth_name,oldp,currlen);
366 if(error)
367 return error;
368 }
369 }
370 /* return length of thread name minus NULL terminator (just like strlen) */
371 req->oldidx = currlen;
372 }
373
374 /* We want to set the name to something */
375 if(newp)
376 {
377 if(newlen > (MAXTHREADNAMESIZE - 1))
378 return ENAMETOOLONG;
379 if(!ut->pth_name)
380 {
381 ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
382 if(!ut->pth_name)
383 return ENOMEM;
384 }
385 bzero(ut->pth_name, MAXTHREADNAMESIZE);
386 error = copyin(newp, ut->pth_name, newlen);
387 if(error)
388 return error;
389 }
390
391 return 0;
392}
393
394SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
395
396#define BSD_HOST 1
397STATIC int
398sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
399{
400 host_basic_info_data_t hinfo;
401 kern_return_t kret;
402 uint32_t size;
403 int changed;
404 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
405 struct _processor_statistics_np *buf;
406 int error;
407
408 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
409 if (kret != KERN_SUCCESS) {
410 return EINVAL;
411 }
412
413 size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
414
415 if (req->oldlen < size) {
416 return EINVAL;
417 }
418
419 MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
420
421 kret = get_sched_statistics(buf, &size);
422 if (kret != KERN_SUCCESS) {
423 error = EINVAL;
424 goto out;
425 }
426
427 error = sysctl_io_opaque(req, buf, size, &changed);
428 if (error) {
429 goto out;
430 }
431
432 if (changed) {
433 panic("Sched info changed?!");
434 }
435out:
436 FREE(buf, M_TEMP);
437 return error;
438}
439
440SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
441
442STATIC int
443sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
444{
445 boolean_t active;
446 int res;
447
448 if (req->newlen != sizeof(active)) {
449 return EINVAL;
450 }
451
452 res = copyin(req->newptr, &active, sizeof(active));
453 if (res != 0) {
454 return res;
455 }
456
457 return set_sched_stats_active(active);
458}
459
460SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
461
462extern int get_kernel_symfile(proc_t, char **);
463
464#if COUNT_SYSCALLS
465#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
466
467extern int nsysent;
468extern int syscalls_log[];
469extern const char *syscallnames[];
470
471STATIC int
472sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
473{
474 __unused int cmd = oidp->oid_arg2; /* subcommand*/
475 __unused int *name = arg1; /* oid element argument vector */
476 __unused int namelen = arg2; /* number of oid element arguments */
477 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
478 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
479 user_addr_t newp = req->newptr; /* user buffer copy in address */
480 size_t newlen = req->newlen; /* user buffer copy in size */
481 int error;
482
483 int tmp;
484
485 /* valid values passed in:
486 * = 0 means don't keep called counts for each bsd syscall
487 * > 0 means keep called counts for each bsd syscall
488 * = 2 means dump current counts to the system log
489 * = 3 means reset all counts
490 * for example, to dump current counts:
491 * sysctl -w kern.count_calls=2
492 */
493 error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
494 if ( error != 0 ) {
495 return (error);
496 }
497
498 if ( tmp == 1 ) {
499 do_count_syscalls = 1;
500 }
501 else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
502 int i;
503 for ( i = 0; i < nsysent; i++ ) {
504 if ( syscalls_log[i] != 0 ) {
505 if ( tmp == 2 ) {
506 printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
507 }
508 else {
509 syscalls_log[i] = 0;
510 }
511 }
512 }
513 if ( tmp != 0 ) {
514 do_count_syscalls = 1;
515 }
516 }
517
518 /* adjust index so we return the right required/consumed amount */
519 if (!error)
520 req->oldidx += req->oldlen;
521
522 return (error);
523}
524SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
525 0, /* Pointer argument (arg1) */
526 0, /* Integer argument (arg2) */
527 sysctl_docountsyscalls, /* Handler function */
528 NULL, /* Data pointer */
529 "");
530#endif /* COUNT_SYSCALLS */
531
532/*
533 * The following sysctl_* functions should not be used
534 * any more, as they can only cope with callers in
535 * user mode: Use new-style
536 * sysctl_io_number()
537 * sysctl_io_string()
538 * sysctl_io_opaque()
539 * instead.
540 */
541
542/*
543 * Validate parameters and get old / set new parameters
544 * for an integer-valued sysctl function.
545 */
546int
547sysctl_int(user_addr_t oldp, size_t *oldlenp,
548 user_addr_t newp, size_t newlen, int *valp)
549{
550 int error = 0;
551
552 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
553 return (EFAULT);
554 if (oldp && *oldlenp < sizeof(int))
555 return (ENOMEM);
556 if (newp && newlen != sizeof(int))
557 return (EINVAL);
558 *oldlenp = sizeof(int);
559 if (oldp)
560 error = copyout(valp, oldp, sizeof(int));
561 if (error == 0 && newp) {
562 error = copyin(newp, valp, sizeof(int));
563 AUDIT_ARG(value32, *valp);
564 }
565 return (error);
566}
567
568/*
569 * Validate parameters and get old / set new parameters
570 * for an quad(64bit)-valued sysctl function.
571 */
572int
573sysctl_quad(user_addr_t oldp, size_t *oldlenp,
574 user_addr_t newp, size_t newlen, quad_t *valp)
575{
576 int error = 0;
577
578 if (oldp != USER_ADDR_NULL && oldlenp == NULL)
579 return (EFAULT);
580 if (oldp && *oldlenp < sizeof(quad_t))
581 return (ENOMEM);
582 if (newp && newlen != sizeof(quad_t))
583 return (EINVAL);
584 *oldlenp = sizeof(quad_t);
585 if (oldp)
586 error = copyout(valp, oldp, sizeof(quad_t));
587 if (error == 0 && newp)
588 error = copyin(newp, valp, sizeof(quad_t));
589 return (error);
590}
591
592STATIC int
593sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
594{
595 if (p->p_pid != (pid_t)*(int*)arg)
596 return(0);
597 else
598 return(1);
599}
600
601STATIC int
602sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
603{
604 if (p->p_pgrpid != (pid_t)*(int*)arg)
605 return(0);
606 else
607 return(1);
608}
609
610STATIC int
611sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
612{
613 int retval;
614 struct tty *tp;
615
616 /* This is very racy but list lock is held.. Hmmm. */
617 if ((p->p_flag & P_CONTROLT) == 0 ||
618 (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
619 (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
620 tp->t_dev != (dev_t)*(int*)arg)
621 retval = 0;
622 else
623 retval = 1;
624
625 return(retval);
626}
627
628STATIC int
629sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
630{
631 kauth_cred_t my_cred;
632 uid_t uid;
633
634 if (p->p_ucred == NULL)
635 return(0);
636 my_cred = kauth_cred_proc_ref(p);
637 uid = kauth_cred_getuid(my_cred);
638 kauth_cred_unref(&my_cred);
639
640 if (uid != (uid_t)*(int*)arg)
641 return(0);
642 else
643 return(1);
644}
645
646
647STATIC int
648sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
649{
650 kauth_cred_t my_cred;
651 uid_t ruid;
652
653 if (p->p_ucred == NULL)
654 return(0);
655 my_cred = kauth_cred_proc_ref(p);
656 ruid = kauth_cred_getruid(my_cred);
657 kauth_cred_unref(&my_cred);
658
659 if (ruid != (uid_t)*(int*)arg)
660 return(0);
661 else
662 return(1);
663}
664
665#if CONFIG_LCTX
666STATIC int
667sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
668{
669 if ((p->p_lctx == NULL) ||
670 (p->p_lctx->lc_id != (pid_t)*(int*)arg))
671 return(0);
672 else
673 return(1);
674}
675#endif
676
677/*
678 * try over estimating by 5 procs
679 */
680#define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc))
681struct sysdoproc_args {
682 int buflen;
683 void *kprocp;
684 boolean_t is_64_bit;
685 user_addr_t dp;
686 size_t needed;
687 int sizeof_kproc;
688 int *errorp;
689 int uidcheck;
690 int ruidcheck;
691 int ttycheck;
692 int uidval;
693};
694
695int
696sysdoproc_callback(proc_t p, void *arg)
697{
698 struct sysdoproc_args *args = arg;
699
700 if (args->buflen >= args->sizeof_kproc) {
701 if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
702 return (PROC_RETURNED);
703 if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
704 return (PROC_RETURNED);
705 if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
706 return (PROC_RETURNED);
707
708 bzero(args->kprocp, args->sizeof_kproc);
709 if (args->is_64_bit)
710 fill_user64_proc(p, args->kprocp);
711 else
712 fill_user32_proc(p, args->kprocp);
713 int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
714 if (error) {
715 *args->errorp = error;
716 return (PROC_RETURNED_DONE);
717 }
718 args->dp += args->sizeof_kproc;
719 args->buflen -= args->sizeof_kproc;
720 }
721 args->needed += args->sizeof_kproc;
722 return (PROC_RETURNED);
723}
724
725SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
726STATIC int
727sysctl_prochandle SYSCTL_HANDLER_ARGS
728{
729 int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */
730 int *name = arg1; /* oid element argument vector */
731 int namelen = arg2; /* number of oid element arguments */
732 user_addr_t where = req->oldptr;/* user buffer copy out address */
733
734 user_addr_t dp = where;
735 size_t needed = 0;
736 int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
737 int error = 0;
738 boolean_t is_64_bit = proc_is64bit(current_proc());
739 struct user32_kinfo_proc user32_kproc;
740 struct user64_kinfo_proc user_kproc;
741 int sizeof_kproc;
742 void *kprocp;
743 int (*filterfn)(proc_t, void *) = 0;
744 struct sysdoproc_args args;
745 int uidcheck = 0;
746 int ruidcheck = 0;
747 int ttycheck = 0;
748
749 if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
750 return (EINVAL);
751
752 if (is_64_bit) {
753 sizeof_kproc = sizeof(user_kproc);
754 kprocp = &user_kproc;
755 } else {
756 sizeof_kproc = sizeof(user32_kproc);
757 kprocp = &user32_kproc;
758 }
759
760 switch (cmd) {
761
762 case KERN_PROC_PID:
763 filterfn = sysdoproc_filt_KERN_PROC_PID;
764 break;
765
766 case KERN_PROC_PGRP:
767 filterfn = sysdoproc_filt_KERN_PROC_PGRP;
768 break;
769
770 case KERN_PROC_TTY:
771 ttycheck = 1;
772 break;
773
774 case KERN_PROC_UID:
775 uidcheck = 1;
776 break;
777
778 case KERN_PROC_RUID:
779 ruidcheck = 1;
780 break;
781
782#if CONFIG_LCTX
783 case KERN_PROC_LCID:
784 filterfn = sysdoproc_filt_KERN_PROC_LCID;
785 break;
786#endif
787 case KERN_PROC_ALL:
788 break;
789
790 default:
791 /* must be kern.proc.<unknown> */
792 return (ENOTSUP);
793 }
794
795 error = 0;
796 args.buflen = buflen;
797 args.kprocp = kprocp;
798 args.is_64_bit = is_64_bit;
799 args.dp = dp;
800 args.needed = needed;
801 args.errorp = &error;
802 args.uidcheck = uidcheck;
803 args.ruidcheck = ruidcheck;
804 args.ttycheck = ttycheck;
805 args.sizeof_kproc = sizeof_kproc;
806 if (namelen)
807 args.uidval = name[0];
808
809 proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
810 sysdoproc_callback, &args, filterfn, name);
811
812 if (error)
813 return (error);
814
815 dp = args.dp;
816 needed = args.needed;
817
818 if (where != USER_ADDR_NULL) {
819 req->oldlen = dp - where;
820 if (needed > req->oldlen)
821 return (ENOMEM);
822 } else {
823 needed += KERN_PROCSLOP;
824 req->oldlen = needed;
825 }
826 /* adjust index so we return the right required/consumed amount */
827 req->oldidx += req->oldlen;
828 return (0);
829}
830
831/*
832 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
833 * in the sysctl declaration itself, which comes into the handler function
834 * as 'oidp->oid_arg2'.
835 *
836 * For these particular sysctls, since they have well known OIDs, we could
837 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
838 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
839 * of a well known value with a common handler function. This is desirable,
840 * because we want well known values to "go away" at some future date.
841 *
842 * It should be noted that the value of '((int *)arg1)[1]' is used for many
843 * an integer parameter to the subcommand for many of these sysctls; we'd
844 * rather have used '((int *)arg1)[0]' for that, or even better, an element
845 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
846 * and then use leaf-node permissions enforcement, but that would have
847 * necessitated modifying user space code to correspond to the interface
848 * change, and we are striving for binary backward compatibility here; even
849 * though these are SPI, and not intended for use by user space applications
850 * which are not themselves system tools or libraries, some applications
851 * have erroneously used them.
852 */
853SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
854 0, /* Pointer argument (arg1) */
855 KERN_PROC_ALL, /* Integer argument (arg2) */
856 sysctl_prochandle, /* Handler function */
857 NULL, /* Data is size variant on ILP32/LP64 */
858 "");
859SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
860 0, /* Pointer argument (arg1) */
861 KERN_PROC_PID, /* Integer argument (arg2) */
862 sysctl_prochandle, /* Handler function */
863 NULL, /* Data is size variant on ILP32/LP64 */
864 "");
865SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
866 0, /* Pointer argument (arg1) */
867 KERN_PROC_TTY, /* Integer argument (arg2) */
868 sysctl_prochandle, /* Handler function */
869 NULL, /* Data is size variant on ILP32/LP64 */
870 "");
871SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
872 0, /* Pointer argument (arg1) */
873 KERN_PROC_PGRP, /* Integer argument (arg2) */
874 sysctl_prochandle, /* Handler function */
875 NULL, /* Data is size variant on ILP32/LP64 */
876 "");
877SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
878 0, /* Pointer argument (arg1) */
879 KERN_PROC_UID, /* Integer argument (arg2) */
880 sysctl_prochandle, /* Handler function */
881 NULL, /* Data is size variant on ILP32/LP64 */
882 "");
883SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
884 0, /* Pointer argument (arg1) */
885 KERN_PROC_RUID, /* Integer argument (arg2) */
886 sysctl_prochandle, /* Handler function */
887 NULL, /* Data is size variant on ILP32/LP64 */
888 "");
889SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
890 0, /* Pointer argument (arg1) */
891 KERN_PROC_LCID, /* Integer argument (arg2) */
892 sysctl_prochandle, /* Handler function */
893 NULL, /* Data is size variant on ILP32/LP64 */
894 "");
895
896
897/*
898 * Fill in non-zero fields of an eproc structure for the specified process.
899 */
900STATIC void
901fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
902{
903 struct tty *tp;
904 struct pgrp *pg;
905 struct session *sessp;
906 kauth_cred_t my_cred;
907
908 pg = proc_pgrp(p);
909 sessp = proc_session(p);
910
911 if (pg != PGRP_NULL) {
912 ep->e_pgid = p->p_pgrpid;
913 ep->e_jobc = pg->pg_jobc;
914 if (sessp != SESSION_NULL && sessp->s_ttyvp)
915 ep->e_flag = EPROC_CTTY;
916 }
917#if CONFIG_LCTX
918 if (p->p_lctx)
919 ep->e_lcid = p->p_lctx->lc_id;
920#endif
921 ep->e_ppid = p->p_ppid;
922 if (p->p_ucred) {
923 my_cred = kauth_cred_proc_ref(p);
924
925 /* A fake historical pcred */
926 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
927 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
928 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
929 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
930
931 /* A fake historical *kauth_cred_t */
932 ep->e_ucred.cr_ref = my_cred->cr_ref;
933 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
934 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
935 bcopy(posix_cred_get(my_cred)->cr_groups,
936 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
937
938 kauth_cred_unref(&my_cred);
939 }
940
941 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
942 (tp = SESSION_TP(sessp))) {
943 ep->e_tdev = tp->t_dev;
944 ep->e_tpgid = sessp->s_ttypgrpid;
945 } else
946 ep->e_tdev = NODEV;
947
948 if (sessp != SESSION_NULL) {
949 if (SESS_LEADER(p, sessp))
950 ep->e_flag |= EPROC_SLEADER;
951 session_rele(sessp);
952 }
953 if (pg != PGRP_NULL)
954 pg_rele(pg);
955}
956
957/*
958 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
959 */
960STATIC void
961fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
962{
963 struct tty *tp;
964 struct pgrp *pg;
965 struct session *sessp;
966 kauth_cred_t my_cred;
967
968 pg = proc_pgrp(p);
969 sessp = proc_session(p);
970
971 if (pg != PGRP_NULL) {
972 ep->e_pgid = p->p_pgrpid;
973 ep->e_jobc = pg->pg_jobc;
974 if (sessp != SESSION_NULL && sessp->s_ttyvp)
975 ep->e_flag = EPROC_CTTY;
976 }
977#if CONFIG_LCTX
978 if (p->p_lctx)
979 ep->e_lcid = p->p_lctx->lc_id;
980#endif
981 ep->e_ppid = p->p_ppid;
982 if (p->p_ucred) {
983 my_cred = kauth_cred_proc_ref(p);
984
985 /* A fake historical pcred */
986 ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
987 ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
988 ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
989 ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
990
991 /* A fake historical *kauth_cred_t */
992 ep->e_ucred.cr_ref = my_cred->cr_ref;
993 ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
994 ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
995 bcopy(posix_cred_get(my_cred)->cr_groups,
996 ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
997
998 kauth_cred_unref(&my_cred);
999 }
1000
1001 if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
1002 (tp = SESSION_TP(sessp))) {
1003 ep->e_tdev = tp->t_dev;
1004 ep->e_tpgid = sessp->s_ttypgrpid;
1005 } else
1006 ep->e_tdev = NODEV;
1007
1008 if (sessp != SESSION_NULL) {
1009 if (SESS_LEADER(p, sessp))
1010 ep->e_flag |= EPROC_SLEADER;
1011 session_rele(sessp);
1012 }
1013 if (pg != PGRP_NULL)
1014 pg_rele(pg);
1015}
1016
1017/*
1018 * Fill in an eproc structure for the specified process.
1019 * bzeroed by our caller, so only set non-zero fields.
1020 */
1021STATIC void
1022fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1023{
1024 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1025 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1026 exp->p_flag = p->p_flag;
1027 if (p->p_lflag & P_LTRACED)
1028 exp->p_flag |= P_TRACED;
1029 if (p->p_lflag & P_LPPWAIT)
1030 exp->p_flag |= P_PPWAIT;
1031 if (p->p_lflag & P_LEXIT)
1032 exp->p_flag |= P_WEXIT;
1033 exp->p_stat = p->p_stat;
1034 exp->p_pid = p->p_pid;
1035 exp->p_oppid = p->p_oppid;
1036 /* Mach related */
1037 exp->user_stack = p->user_stack;
1038 exp->p_debugger = p->p_debugger;
1039 exp->sigwait = p->sigwait;
1040 /* scheduling */
1041#ifdef _PROC_HAS_SCHEDINFO_
1042 exp->p_estcpu = p->p_estcpu;
1043 exp->p_pctcpu = p->p_pctcpu;
1044 exp->p_slptime = p->p_slptime;
1045#endif
1046 exp->p_realtimer.it_interval.tv_sec =
1047 (user32_time_t)p->p_realtimer.it_interval.tv_sec;
1048 exp->p_realtimer.it_interval.tv_usec =
1049 (__int32_t)p->p_realtimer.it_interval.tv_usec;
1050
1051 exp->p_realtimer.it_value.tv_sec =
1052 (user32_time_t)p->p_realtimer.it_value.tv_sec;
1053 exp->p_realtimer.it_value.tv_usec =
1054 (__int32_t)p->p_realtimer.it_value.tv_usec;
1055
1056 exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1057 exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1058
1059 exp->p_sigignore = p->p_sigignore;
1060 exp->p_sigcatch = p->p_sigcatch;
1061 exp->p_priority = p->p_priority;
1062 exp->p_nice = p->p_nice;
1063 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1064 exp->p_xstat = p->p_xstat;
1065 exp->p_acflag = p->p_acflag;
1066}
1067
1068/*
1069 * Fill in an LP64 version of extern_proc structure for the specified process.
1070 */
1071STATIC void
1072fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1073{
1074 exp->p_starttime.tv_sec = p->p_start.tv_sec;
1075 exp->p_starttime.tv_usec = p->p_start.tv_usec;
1076 exp->p_flag = p->p_flag;
1077 if (p->p_lflag & P_LTRACED)
1078 exp->p_flag |= P_TRACED;
1079 if (p->p_lflag & P_LPPWAIT)
1080 exp->p_flag |= P_PPWAIT;
1081 if (p->p_lflag & P_LEXIT)
1082 exp->p_flag |= P_WEXIT;
1083 exp->p_stat = p->p_stat;
1084 exp->p_pid = p->p_pid;
1085 exp->p_oppid = p->p_oppid;
1086 /* Mach related */
1087 exp->user_stack = p->user_stack;
1088 exp->p_debugger = p->p_debugger;
1089 exp->sigwait = p->sigwait;
1090 /* scheduling */
1091#ifdef _PROC_HAS_SCHEDINFO_
1092 exp->p_estcpu = p->p_estcpu;
1093 exp->p_pctcpu = p->p_pctcpu;
1094 exp->p_slptime = p->p_slptime;
1095#endif
1096 exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1097 exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1098
1099 exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1100 exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1101
1102 exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1103 exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1104
1105 exp->p_sigignore = p->p_sigignore;
1106 exp->p_sigcatch = p->p_sigcatch;
1107 exp->p_priority = p->p_priority;
1108 exp->p_nice = p->p_nice;
1109 bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1110 exp->p_xstat = p->p_xstat;
1111 exp->p_acflag = p->p_acflag;
1112}
1113
1114STATIC void
1115fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1116{
1117 /* on a 64 bit kernel, 32 bit users get some truncated information */
1118 fill_user32_externproc(p, &kp->kp_proc);
1119 fill_user32_eproc(p, &kp->kp_eproc);
1120}
1121
1122STATIC void
1123fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1124{
1125 fill_user64_externproc(p, &kp->kp_proc);
1126 fill_user64_eproc(p, &kp->kp_eproc);
1127}
1128
1129STATIC int
1130sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1131{
1132 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1133 int *name = arg1; /* oid element argument vector */
1134 int namelen = arg2; /* number of oid element arguments */
1135 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1136 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1137// user_addr_t newp = req->newptr; /* user buffer copy in address */
1138// size_t newlen = req->newlen; /* user buffer copy in size */
1139
1140 proc_t p = current_proc();
1141 int ret=0;
1142
1143 if (namelen == 0)
1144 return(ENOTSUP);
1145
1146 ret = suser(kauth_cred_get(), &p->p_acflag);
1147#if KPERF
1148 /* Non-root processes may be blessed by kperf to access data
1149 * logged into trace.
1150 */
1151 if (ret)
1152 ret = kperf_access_check();
1153#endif /* KPERF */
1154 if (ret)
1155 return(ret);
1156
1157 switch(name[0]) {
1158 case KERN_KDEFLAGS:
1159 case KERN_KDDFLAGS:
1160 case KERN_KDENABLE:
1161 case KERN_KDGETBUF:
1162 case KERN_KDSETUP:
1163 case KERN_KDREMOVE:
1164 case KERN_KDSETREG:
1165 case KERN_KDGETREG:
1166 case KERN_KDREADTR:
1167 case KERN_KDWRITETR:
1168 case KERN_KDWRITEMAP:
1169 case KERN_KDPIDTR:
1170 case KERN_KDTHRMAP:
1171 case KERN_KDPIDEX:
1172 case KERN_KDSETRTCDEC:
1173 case KERN_KDSETBUF:
1174 case KERN_KDGETENTROPY:
1175 case KERN_KDENABLE_BG_TRACE:
1176 case KERN_KDDISABLE_BG_TRACE:
1177 case KERN_KDREADCURTHRMAP:
1178 case KERN_KDSET_TYPEFILTER:
1179 case KERN_KDBUFWAIT:
1180 case KERN_KDCPUMAP:
1181
1182 ret = kdbg_control(name, namelen, oldp, oldlenp);
1183 break;
1184 default:
1185 ret= ENOTSUP;
1186 break;
1187 }
1188
1189 /* adjust index so we return the right required/consumed amount */
1190 if (!ret)
1191 req->oldidx += req->oldlen;
1192
1193 return (ret);
1194}
1195SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1196 0, /* Pointer argument (arg1) */
1197 0, /* Integer argument (arg2) */
1198 sysctl_kdebug_ops, /* Handler function */
1199 NULL, /* Data pointer */
1200 "");
1201
1202
1203/*
1204 * Return the top *sizep bytes of the user stack, or the entire area of the
1205 * user stack down through the saved exec_path, whichever is smaller.
1206 */
1207STATIC int
1208sysctl_doprocargs SYSCTL_HANDLER_ARGS
1209{
1210 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1211 int *name = arg1; /* oid element argument vector */
1212 int namelen = arg2; /* number of oid element arguments */
1213 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1214 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1215// user_addr_t newp = req->newptr; /* user buffer copy in address */
1216// size_t newlen = req->newlen; /* user buffer copy in size */
1217 int error;
1218
1219 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1220
1221 /* adjust index so we return the right required/consumed amount */
1222 if (!error)
1223 req->oldidx += req->oldlen;
1224
1225 return (error);
1226}
1227SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1228 0, /* Pointer argument (arg1) */
1229 0, /* Integer argument (arg2) */
1230 sysctl_doprocargs, /* Handler function */
1231 NULL, /* Data pointer */
1232 "");
1233
1234STATIC int
1235sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1236{
1237 __unused int cmd = oidp->oid_arg2; /* subcommand*/
1238 int *name = arg1; /* oid element argument vector */
1239 int namelen = arg2; /* number of oid element arguments */
1240 user_addr_t oldp = req->oldptr; /* user buffer copy out address */
1241 size_t *oldlenp = &req->oldlen; /* user buffer copy out size */
1242// user_addr_t newp = req->newptr; /* user buffer copy in address */
1243// size_t newlen = req->newlen; /* user buffer copy in size */
1244 int error;
1245
1246 error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1247
1248 /* adjust index so we return the right required/consumed amount */
1249 if (!error)
1250 req->oldidx += req->oldlen;
1251
1252 return (error);
1253}
1254SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1255 0, /* Pointer argument (arg1) */
1256 0, /* Integer argument (arg2) */
1257 sysctl_doprocargs2, /* Handler function */
1258 NULL, /* Data pointer */
1259 "");
1260
1261STATIC int
1262sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1263 size_t *sizep, proc_t cur_proc, int argc_yes)
1264{
1265 proc_t p;
1266 int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1267 int error = 0;
1268 struct _vm_map *proc_map;
1269 struct task * task;
1270 vm_map_copy_t tmp;
1271 user_addr_t arg_addr;
1272 size_t arg_size;
1273 caddr_t data;
1274 size_t argslen=0;
1275 int size;
1276 vm_offset_t copy_start, copy_end;
1277 kern_return_t ret;
1278 int pid;
1279 kauth_cred_t my_cred;
1280 uid_t uid;
1281
1282 if ( namelen < 1 )
1283 return(EINVAL);
1284
1285 if (argc_yes)
1286 buflen -= sizeof(int); /* reserve first word to return argc */
1287
1288 /* we only care about buflen when where (oldp from sysctl) is not NULL. */
1289 /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1290 /* is not NULL then the caller wants us to return the length needed to */
1291 /* hold the data we would return */
1292 if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1293 return(EINVAL);
1294 }
1295 arg_size = buflen;
1296
1297 /*
1298 * Lookup process by pid
1299 */
1300 pid = name[0];
1301 p = proc_find(pid);
1302 if (p == NULL) {
1303 return(EINVAL);
1304 }
1305
1306 /*
1307 * Copy the top N bytes of the stack.
1308 * On all machines we have so far, the stack grows
1309 * downwards.
1310 *
1311 * If the user expects no more than N bytes of
1312 * argument list, use that as a guess for the
1313 * size.
1314 */
1315
1316 if (!p->user_stack) {
1317 proc_rele(p);
1318 return(EINVAL);
1319 }
1320
1321 if (where == USER_ADDR_NULL) {
1322 /* caller only wants to know length of proc args data */
1323 if (sizep == NULL) {
1324 proc_rele(p);
1325 return(EFAULT);
1326 }
1327
1328 size = p->p_argslen;
1329 proc_rele(p);
1330 if (argc_yes) {
1331 size += sizeof(int);
1332 }
1333 else {
1334 /*
1335 * old PROCARGS will return the executable's path and plus some
1336 * extra space for work alignment and data tags
1337 */
1338 size += PATH_MAX + (6 * sizeof(int));
1339 }
1340 size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1341 *sizep = size;
1342 return (0);
1343 }
1344
1345 my_cred = kauth_cred_proc_ref(p);
1346 uid = kauth_cred_getuid(my_cred);
1347 kauth_cred_unref(&my_cred);
1348
1349 if ((uid != kauth_cred_getuid(kauth_cred_get()))
1350 && suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1351 proc_rele(p);
1352 return (EINVAL);
1353 }
1354
1355 if ((u_int)arg_size > p->p_argslen)
1356 arg_size = round_page(p->p_argslen);
1357
1358 arg_addr = p->user_stack - arg_size;
1359
1360
1361 /*
1362 * Before we can block (any VM code), make another
1363 * reference to the map to keep it alive. We do
1364 * that by getting a reference on the task itself.
1365 */
1366 task = p->task;
1367 if (task == NULL) {
1368 proc_rele(p);
1369 return(EINVAL);
1370 }
1371
1372 argslen = p->p_argslen;
1373 /*
1374 * Once we have a task reference we can convert that into a
1375 * map reference, which we will use in the calls below. The
1376 * task/process may change its map after we take this reference
1377 * (see execve), but the worst that will happen then is a return
1378 * of stale info (which is always a possibility).
1379 */
1380 task_reference(task);
1381 proc_rele(p);
1382 proc_map = get_task_map_reference(task);
1383 task_deallocate(task);
1384
1385 if (proc_map == NULL)
1386 return(EINVAL);
1387
1388
1389 ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1390 if (ret != KERN_SUCCESS) {
1391 vm_map_deallocate(proc_map);
1392 return(ENOMEM);
1393 }
1394
1395 copy_end = round_page(copy_start + arg_size);
1396
1397 if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1398 (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1399 vm_map_deallocate(proc_map);
1400 kmem_free(kernel_map, copy_start,
1401 round_page(arg_size));
1402 return (EIO);
1403 }
1404
1405 /*
1406 * Now that we've done the copyin from the process'
1407 * map, we can release the reference to it.
1408 */
1409 vm_map_deallocate(proc_map);
1410
1411 if( vm_map_copy_overwrite(kernel_map,
1412 (vm_map_address_t)copy_start,
1413 tmp, FALSE) != KERN_SUCCESS) {
1414 kmem_free(kernel_map, copy_start,
1415 round_page(arg_size));
1416 return (EIO);
1417 }
1418
1419 if (arg_size > argslen) {
1420 data = (caddr_t) (copy_end - argslen);
1421 size = argslen;
1422 } else {
1423 data = (caddr_t) (copy_end - arg_size);
1424 size = arg_size;
1425 }
1426
1427 if (argc_yes) {
1428 /* Put processes argc as the first word in the copyout buffer */
1429 suword(where, p->p_argc);
1430 error = copyout(data, (where + sizeof(int)), size);
1431 size += sizeof(int);
1432 } else {
1433 error = copyout(data, where, size);
1434
1435 /*
1436 * Make the old PROCARGS work to return the executable's path
1437 * But, only if there is enough space in the provided buffer
1438 *
1439 * on entry: data [possibily] points to the beginning of the path
1440 *
1441 * Note: we keep all pointers&sizes aligned to word boundries
1442 */
1443 if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1444 {
1445 int binPath_sz, alignedBinPath_sz = 0;
1446 int extraSpaceNeeded, addThis;
1447 user_addr_t placeHere;
1448 char * str = (char *) data;
1449 int max_len = size;
1450
1451 /* Some apps are really bad about messing up their stacks
1452 So, we have to be extra careful about getting the length
1453 of the executing binary. If we encounter an error, we bail.
1454 */
1455
1456 /* Limit ourselves to PATH_MAX paths */
1457 if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1458
1459 binPath_sz = 0;
1460
1461 while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1462 binPath_sz++;
1463
1464 /* If we have a NUL terminator, copy it, too */
1465 if (binPath_sz < max_len-1) binPath_sz += 1;
1466
1467 /* Pre-Flight the space requiremnts */
1468
1469 /* Account for the padding that fills out binPath to the next word */
1470 alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1471
1472 placeHere = where + size;
1473
1474 /* Account for the bytes needed to keep placeHere word aligned */
1475 addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1476
1477 /* Add up all the space that is needed */
1478 extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1479
1480 /* is there is room to tack on argv[0]? */
1481 if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1482 {
1483 placeHere += addThis;
1484 suword(placeHere, 0);
1485 placeHere += sizeof(int);
1486 suword(placeHere, 0xBFFF0000);
1487 placeHere += sizeof(int);
1488 suword(placeHere, 0);
1489 placeHere += sizeof(int);
1490 error = copyout(data, placeHere, binPath_sz);
1491 if ( ! error )
1492 {
1493 placeHere += binPath_sz;
1494 suword(placeHere, 0);
1495 size += extraSpaceNeeded;
1496 }
1497 }
1498 }
1499 }
1500
1501 if (copy_start != (vm_offset_t) 0) {
1502 kmem_free(kernel_map, copy_start, copy_end - copy_start);
1503 }
1504 if (error) {
1505 return(error);
1506 }
1507
1508 if (where != USER_ADDR_NULL)
1509 *sizep = size;
1510 return (0);
1511}
1512
1513
1514/*
1515 * Max number of concurrent aio requests
1516 */
1517STATIC int
1518sysctl_aiomax
1519(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1520{
1521 int new_value, changed;
1522 int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1523 if (changed) {
1524 /* make sure the system-wide limit is greater than the per process limit */
1525 if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1526 aio_max_requests = new_value;
1527 else
1528 error = EINVAL;
1529 }
1530 return(error);
1531}
1532
1533
1534/*
1535 * Max number of concurrent aio requests per process
1536 */
1537STATIC int
1538sysctl_aioprocmax
1539(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1540{
1541 int new_value, changed;
1542 int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1543 if (changed) {
1544 /* make sure per process limit is less than the system-wide limit */
1545 if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1546 aio_max_requests_per_process = new_value;
1547 else
1548 error = EINVAL;
1549 }
1550 return(error);
1551}
1552
1553
1554/*
1555 * Max number of async IO worker threads
1556 */
1557STATIC int
1558sysctl_aiothreads
1559(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1560{
1561 int new_value, changed;
1562 int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1563 if (changed) {
1564 /* we only allow an increase in the number of worker threads */
1565 if (new_value > aio_worker_threads ) {
1566 _aio_create_worker_threads((new_value - aio_worker_threads));
1567 aio_worker_threads = new_value;
1568 }
1569 else
1570 error = EINVAL;
1571 }
1572 return(error);
1573}
1574
1575
1576/*
1577 * System-wide limit on the max number of processes
1578 */
1579STATIC int
1580sysctl_maxproc
1581(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1582{
1583 int new_value, changed;
1584 int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1585 if (changed) {
1586 AUDIT_ARG(value32, new_value);
1587 /* make sure the system-wide limit is less than the configured hard
1588 limit set at kernel compilation */
1589 if (new_value <= hard_maxproc && new_value > 0)
1590 maxproc = new_value;
1591 else
1592 error = EINVAL;
1593 }
1594 return(error);
1595}
1596
1597SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1598 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1599 ostype, 0, "");
1600SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1601 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1602 osrelease, 0, "");
1603SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1604 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1605 (int *)NULL, BSD, "");
1606SYSCTL_STRING(_kern, KERN_VERSION, version,
1607 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1608 version, 0, "");
1609SYSCTL_STRING(_kern, OID_AUTO, uuid,
1610 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1611 &kernel_uuid_string[0], 0, "");
1612
1613#if DEBUG
1614int debug_kprint_syscall = 0;
1615char debug_kprint_syscall_process[MAXCOMLEN+1];
1616
1617/* Thread safe: bits and string value are not used to reclaim state */
1618SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1619 CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1620SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1621 CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1622 "name of process for kprintf syscall tracing");
1623
1624int debug_kprint_current_process(const char **namep)
1625{
1626 struct proc *p = current_proc();
1627
1628 if (p == NULL) {
1629 return 0;
1630 }
1631
1632 if (debug_kprint_syscall_process[0]) {
1633 /* user asked to scope tracing to a particular process name */
1634 if(0 == strncmp(debug_kprint_syscall_process,
1635 p->p_comm, sizeof(debug_kprint_syscall_process))) {
1636 /* no value in telling the user that we traced what they asked */
1637 if(namep) *namep = NULL;
1638
1639 return 1;
1640 } else {
1641 return 0;
1642 }
1643 }
1644
1645 /* trace all processes. Tell user what we traced */
1646 if (namep) {
1647 *namep = p->p_comm;
1648 }
1649
1650 return 1;
1651}
1652#endif
1653
1654/* PR-5293665: need to use a callback function for kern.osversion to set
1655 * osversion in IORegistry */
1656
1657STATIC int
1658sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1659{
1660 int rval = 0;
1661
1662 rval = sysctl_handle_string(oidp, arg1, arg2, req);
1663
1664 if (req->newptr) {
1665 IORegistrySetOSBuildVersion((char *)arg1);
1666 }
1667
1668 return rval;
1669}
1670
1671SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1672 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1673 osversion, 256 /* OSVERSIZE*/,
1674 sysctl_osversion, "A", "");
1675
1676STATIC int
1677sysctl_sysctl_bootargs
1678(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1679{
1680 int error;
1681 char buf[256];
1682
1683 strlcpy(buf, PE_boot_args(), 256);
1684 error = sysctl_io_string(req, buf, 256, 0, NULL);
1685 return(error);
1686}
1687
1688SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1689 CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1690 NULL, 0,
1691 sysctl_sysctl_bootargs, "A", "bootargs");
1692
1693SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1694 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1695 &maxfiles, 0, "");
1696SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1697 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1698 (int *)NULL, ARG_MAX, "");
1699SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1700 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1701 (int *)NULL, _POSIX_VERSION, "");
1702SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1703 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1704 (int *)NULL, NGROUPS_MAX, "");
1705SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1706 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1707 (int *)NULL, 1, "");
1708#if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
1709SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1710 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1711 (int *)NULL, 1, "");
1712#else
1713SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1714 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1715 NULL, 0, "");
1716#endif
1717SYSCTL_INT(_kern, OID_AUTO, num_files,
1718 CTLFLAG_RD | CTLFLAG_LOCKED,
1719 &nfiles, 0, "");
1720SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1721 CTLFLAG_RD | CTLFLAG_LOCKED,
1722 &numvnodes, 0, "");
1723SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1724 CTLFLAG_RD | CTLFLAG_LOCKED,
1725 &task_max, 0, "");
1726SYSCTL_INT(_kern, OID_AUTO, num_threads,
1727 CTLFLAG_RD | CTLFLAG_LOCKED,
1728 &thread_max, 0, "");
1729SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1730 CTLFLAG_RD | CTLFLAG_LOCKED,
1731 &task_threadmax, 0, "");
1732
1733STATIC int
1734sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1735{
1736 int oldval = desiredvnodes;
1737 int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1738
1739 if (oldval != desiredvnodes) {
1740 reset_vmobjectcache(oldval, desiredvnodes);
1741 resize_namecache(desiredvnodes);
1742 }
1743
1744 return(error);
1745}
1746
1747SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1748 CTLFLAG_RW | CTLFLAG_LOCKED,
1749 &nc_disabled, 0, "");
1750
1751SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1752 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1753 0, 0, sysctl_maxvnodes, "I", "");
1754
1755SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1756 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1757 0, 0, sysctl_maxproc, "I", "");
1758
1759SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1760 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1761 0, 0, sysctl_aiomax, "I", "");
1762
1763SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1764 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1765 0, 0, sysctl_aioprocmax, "I", "");
1766
1767SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1768 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1769 0, 0, sysctl_aiothreads, "I", "");
1770
1771#if (DEVELOPMENT || DEBUG)
1772extern int sched_smt_balance;
1773SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1774 CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1775 &sched_smt_balance, 0, "");
1776#endif
1777
1778STATIC int
1779sysctl_securelvl
1780(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1781{
1782 int new_value, changed;
1783 int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1784 if (changed) {
1785 if (!(new_value < securelevel && req->p->p_pid != 1)) {
1786 proc_list_lock();
1787 securelevel = new_value;
1788 proc_list_unlock();
1789 } else {
1790 error = EPERM;
1791 }
1792 }
1793 return(error);
1794}
1795
1796SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1797 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1798 0, 0, sysctl_securelvl, "I", "");
1799
1800
1801STATIC int
1802sysctl_domainname
1803(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1804{
1805 int error, changed;
1806 error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1807 if (changed) {
1808 domainnamelen = strlen(domainname);
1809 }
1810 return(error);
1811}
1812
1813SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1814 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1815 0, 0, sysctl_domainname, "A", "");
1816
1817SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1818 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1819 &hostid, 0, "");
1820
1821STATIC int
1822sysctl_hostname
1823(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1824{
1825 int error, changed;
1826 error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1827 if (changed) {
1828 hostnamelen = req->newlen;
1829 }
1830 return(error);
1831}
1832
1833
1834SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1835 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1836 0, 0, sysctl_hostname, "A", "");
1837
1838STATIC int
1839sysctl_procname
1840(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1841{
1842 /* Original code allowed writing, I'm copying this, although this all makes
1843 no sense to me. Besides, this sysctl is never used. */
1844 return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1845}
1846
1847SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
1848 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1849 0, 0, sysctl_procname, "A", "");
1850
1851SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
1852 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1853 &speculative_reads_disabled, 0, "");
1854
1855SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
1856 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1857 &ignore_is_ssd, 0, "");
1858
1859SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
1860 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1861 &preheat_max_bytes, 0, "");
1862
1863SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
1864 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1865 &preheat_min_bytes, 0, "");
1866
1867SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1868 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1869 &speculative_prefetch_max, 0, "");
1870
1871SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
1872 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1873 &speculative_prefetch_max_iosize, 0, "");
1874
1875SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
1876 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1877 &vm_page_free_target, 0, "");
1878
1879SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
1880 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1881 &vm_page_free_min, 0, "");
1882
1883SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
1884 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1885 &vm_page_free_reserved, 0, "");
1886
1887SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
1888 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1889 &vm_page_speculative_percentage, 0, "");
1890
1891SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
1892 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1893 &vm_page_speculative_q_age_ms, 0, "");
1894
1895SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
1896 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1897 &vm_max_delayed_work_limit, 0, "");
1898
1899SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
1900 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1901 &vm_max_batch, 0, "");
1902
1903SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
1904 CTLFLAG_RD | CTLFLAG_LOCKED,
1905 &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
1906
1907STATIC int
1908sysctl_boottime
1909(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1910{
1911 time_t tv_sec = boottime_sec();
1912 struct proc *p = req->p;
1913
1914 if (proc_is64bit(p)) {
1915 struct user64_timeval t;
1916 t.tv_sec = tv_sec;
1917 t.tv_usec = 0;
1918 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1919 } else {
1920 struct user32_timeval t;
1921 t.tv_sec = tv_sec;
1922 t.tv_usec = 0;
1923 return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1924 }
1925}
1926
1927SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
1928 CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1929 0, 0, sysctl_boottime, "S,timeval", "");
1930
1931STATIC int
1932sysctl_symfile
1933(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1934{
1935 char *str;
1936 int error = get_kernel_symfile(req->p, &str);
1937 if (error)
1938 return (error);
1939 return sysctl_io_string(req, str, 0, 0, NULL);
1940}
1941
1942
1943SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
1944 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
1945 0, 0, sysctl_symfile, "A", "");
1946
1947#if NFSCLIENT
1948STATIC int
1949sysctl_netboot
1950(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1951{
1952 return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
1953}
1954
1955SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
1956 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1957 0, 0, sysctl_netboot, "I", "");
1958#endif
1959
1960#ifdef CONFIG_IMGSRC_ACCESS
1961/*
1962 * Legacy--act as if only one layer of nesting is possible.
1963 */
1964STATIC int
1965sysctl_imgsrcdev
1966(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1967{
1968 vfs_context_t ctx = vfs_context_current();
1969 vnode_t devvp;
1970 int result;
1971
1972 if (!vfs_context_issuser(ctx)) {
1973 return EPERM;
1974 }
1975
1976 if (imgsrc_rootvnodes[0] == NULL) {
1977 return ENOENT;
1978 }
1979
1980 result = vnode_getwithref(imgsrc_rootvnodes[0]);
1981 if (result != 0) {
1982 return result;
1983 }
1984
1985 devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
1986 result = vnode_getwithref(devvp);
1987 if (result != 0) {
1988 goto out;
1989 }
1990
1991 result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
1992
1993 vnode_put(devvp);
1994out:
1995 vnode_put(imgsrc_rootvnodes[0]);
1996 return result;
1997}
1998
1999SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
2000 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2001 0, 0, sysctl_imgsrcdev, "I", "");
2002
2003STATIC int
2004sysctl_imgsrcinfo
2005(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2006{
2007 int error;
2008 struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
2009 uint32_t i;
2010 vnode_t rvp, devvp;
2011
2012 if (imgsrc_rootvnodes[0] == NULLVP) {
2013 return ENXIO;
2014 }
2015
2016 for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2017 /*
2018 * Go get the root vnode.
2019 */
2020 rvp = imgsrc_rootvnodes[i];
2021 if (rvp == NULLVP) {
2022 break;
2023 }
2024
2025 error = vnode_get(rvp);
2026 if (error != 0) {
2027 return error;
2028 }
2029
2030 /*
2031 * For now, no getting at a non-local volume.
2032 */
2033 devvp = vnode_mount(rvp)->mnt_devvp;
2034 if (devvp == NULL) {
2035 vnode_put(rvp);
2036 return EINVAL;
2037 }
2038
2039 error = vnode_getwithref(devvp);
2040 if (error != 0) {
2041 vnode_put(rvp);
2042 return error;
2043 }
2044
2045 /*
2046 * Fill in info.
2047 */
2048 info[i].ii_dev = vnode_specrdev(devvp);
2049 info[i].ii_flags = 0;
2050 info[i].ii_height = i;
2051 bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2052
2053 vnode_put(devvp);
2054 vnode_put(rvp);
2055 }
2056
2057 return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2058}
2059
2060SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2061 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2062 0, 0, sysctl_imgsrcinfo, "I", "");
2063
2064#endif /* CONFIG_IMGSRC_ACCESS */
2065
2066
2067SYSCTL_DECL(_kern_timer);
2068SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2069
2070
2071SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2072 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2073 &mach_timer_coalescing_enabled, 0, "");
2074
2075SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2076 CTLFLAG_RW | CTLFLAG_LOCKED,
2077 &timer_deadline_tracking_bin_1, "");
2078SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2079 CTLFLAG_RW | CTLFLAG_LOCKED,
2080 &timer_deadline_tracking_bin_2, "");
2081
2082SYSCTL_DECL(_kern_timer_longterm);
2083SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2084
2085
2086/* Must match definition in osfmk/kern/timer_call.c */
2087enum {
2088 THRESHOLD, QCOUNT,
2089 ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2090 LATENCY, LATENCY_MIN, LATENCY_MAX
2091};
2092extern uint64_t timer_sysctl_get(int);
2093extern int timer_sysctl_set(int, uint64_t);
2094
2095STATIC int
2096sysctl_timer
2097(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2098{
2099 int oid = (int)arg1;
2100 uint64_t value = timer_sysctl_get(oid);
2101 uint64_t new_value;
2102 int error;
2103 int changed;
2104
2105 error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2106 if (changed)
2107 error = timer_sysctl_set(oid, new_value);
2108
2109 return error;
2110}
2111
2112SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2113 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2114 (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2115SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2116 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2117 (void *) QCOUNT, 0, sysctl_timer, "Q", "");
2118#if DEBUG
2119SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2120 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2121 (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2122SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2123 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2124 (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2125SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2126 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2127 (void *) ESCALATES, 0, sysctl_timer, "Q", "");
2128SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2129 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2130 (void *) SCANS, 0, sysctl_timer, "Q", "");
2131SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2132 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2133 (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2134SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2135 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2136 (void *) LATENCY, 0, sysctl_timer, "Q", "");
2137SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2138 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2139 (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2140SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2141 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2142 (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2143#endif /* DEBUG */
2144
2145STATIC int
2146sysctl_usrstack
2147(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2148{
2149 return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2150}
2151
2152SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2153 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2154 0, 0, sysctl_usrstack, "I", "");
2155
2156STATIC int
2157sysctl_usrstack64
2158(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2159{
2160 return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2161}
2162
2163SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2164 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2165 0, 0, sysctl_usrstack64, "Q", "");
2166
2167SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2168 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2169 corefilename, sizeof(corefilename), "");
2170
2171STATIC int
2172sysctl_coredump
2173(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2174{
2175#ifdef SECURE_KERNEL
2176 (void)req;
2177 return (ENOTSUP);
2178#else
2179 int new_value, changed;
2180 int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2181 if (changed) {
2182 if ((new_value == 0) || (new_value == 1))
2183 do_coredump = new_value;
2184 else
2185 error = EINVAL;
2186 }
2187 return(error);
2188#endif
2189}
2190
2191SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2192 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2193 0, 0, sysctl_coredump, "I", "");
2194
2195STATIC int
2196sysctl_suid_coredump
2197(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2198{
2199#ifdef SECURE_KERNEL
2200 (void)req;
2201 return (ENOTSUP);
2202#else
2203 int new_value, changed;
2204 int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2205 if (changed) {
2206 if ((new_value == 0) || (new_value == 1))
2207 sugid_coredump = new_value;
2208 else
2209 error = EINVAL;
2210 }
2211 return(error);
2212#endif
2213}
2214
2215SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2216 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2217 0, 0, sysctl_suid_coredump, "I", "");
2218
2219STATIC int
2220sysctl_delayterm
2221(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2222{
2223 struct proc *p = req->p;
2224 int new_value, changed;
2225 int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2226 if (changed) {
2227 proc_lock(p);
2228 if (new_value)
2229 req->p->p_lflag |= P_LDELAYTERM;
2230 else
2231 req->p->p_lflag &= ~P_LDELAYTERM;
2232 proc_unlock(p);
2233 }
2234 return(error);
2235}
2236
2237SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2238 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2239 0, 0, sysctl_delayterm, "I", "");
2240
2241
2242STATIC int
2243sysctl_rage_vnode
2244(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2245{
2246 struct proc *p = req->p;
2247 struct uthread *ut;
2248 int new_value, old_value, changed;
2249 int error;
2250
2251 ut = get_bsdthread_info(current_thread());
2252
2253 if (ut->uu_flag & UT_RAGE_VNODES)
2254 old_value = KERN_RAGE_THREAD;
2255 else if (p->p_lflag & P_LRAGE_VNODES)
2256 old_value = KERN_RAGE_PROC;
2257 else
2258 old_value = 0;
2259
2260 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2261
2262 if (error == 0) {
2263 switch (new_value) {
2264 case KERN_RAGE_PROC:
2265 proc_lock(p);
2266 p->p_lflag |= P_LRAGE_VNODES;
2267 proc_unlock(p);
2268 break;
2269 case KERN_UNRAGE_PROC:
2270 proc_lock(p);
2271 p->p_lflag &= ~P_LRAGE_VNODES;
2272 proc_unlock(p);
2273 break;
2274
2275 case KERN_RAGE_THREAD:
2276 ut->uu_flag |= UT_RAGE_VNODES;
2277 break;
2278 case KERN_UNRAGE_THREAD:
2279 ut = get_bsdthread_info(current_thread());
2280 ut->uu_flag &= ~UT_RAGE_VNODES;
2281 break;
2282 }
2283 }
2284 return(error);
2285}
2286
2287SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2288 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2289 0, 0, sysctl_rage_vnode, "I", "");
2290
2291/* XXX move this interface into libproc and remove this sysctl */
2292STATIC int
2293sysctl_setthread_cpupercent
2294(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2295{
2296 int new_value, old_value;
2297 int error = 0;
2298 kern_return_t kret = KERN_SUCCESS;
2299 uint8_t percent = 0;
2300 int ms_refill = 0;
2301
2302 if (!req->newptr)
2303 return (0);
2304
2305 old_value = 0;
2306
2307 if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2308 return (error);
2309
2310 percent = new_value & 0xff; /* low 8 bytes for perent */
2311 ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
2312 if (percent > 100)
2313 return (EINVAL);
2314
2315 /*
2316 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2317 */
2318 if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2319 return (EIO);
2320
2321 return (0);
2322}
2323
2324SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2325 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2326 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2327
2328
2329STATIC int
2330sysctl_kern_check_openevt
2331(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2332{
2333 struct proc *p = req->p;
2334 int new_value, old_value, changed;
2335 int error;
2336
2337 if (p->p_flag & P_CHECKOPENEVT) {
2338 old_value = KERN_OPENEVT_PROC;
2339 } else {
2340 old_value = 0;
2341 }
2342
2343 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2344
2345 if (error == 0) {
2346 switch (new_value) {
2347 case KERN_OPENEVT_PROC:
2348 OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2349 break;
2350
2351 case KERN_UNOPENEVT_PROC:
2352 OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2353 break;
2354
2355 default:
2356 error = EINVAL;
2357 }
2358 }
2359 return(error);
2360}
2361
2362SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2363 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2364
2365
2366
2367STATIC int
2368sysctl_nx
2369(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2370{
2371#ifdef SECURE_KERNEL
2372 (void)req;
2373 return ENOTSUP;
2374#else
2375 int new_value, changed;
2376 int error;
2377
2378 error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2379 if (error)
2380 return error;
2381
2382 if (changed) {
2383#if defined(__i386__) || defined(__x86_64__)
2384 /*
2385 * Only allow setting if NX is supported on the chip
2386 */
2387 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2388 return ENOTSUP;
2389#endif
2390 nx_enabled = new_value;
2391 }
2392 return(error);
2393#endif /* SECURE_KERNEL */
2394}
2395
2396
2397
2398SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2399 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2400 0, 0, sysctl_nx, "I", "");
2401
2402STATIC int
2403sysctl_loadavg
2404(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2405{
2406 if (proc_is64bit(req->p)) {
2407 struct user64_loadavg loadinfo64;
2408 fill_loadavg64(&averunnable, &loadinfo64);
2409 return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2410 } else {
2411 struct user32_loadavg loadinfo32;
2412 fill_loadavg32(&averunnable, &loadinfo32);
2413 return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2414 }
2415}
2416
2417SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2418 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2419 0, 0, sysctl_loadavg, "S,loadavg", "");
2420
2421/*
2422 * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
2423 */
2424STATIC int
2425sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2426 __unused int arg2, struct sysctl_req *req)
2427{
2428 int old_value=0, new_value=0, error=0;
2429
2430 if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2431 return(error);
2432 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2433 if (!error) {
2434 return (vm_toggle_entry_reuse(new_value, NULL));
2435 }
2436 return(error);
2437}
2438
2439SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2440
2441STATIC int
2442sysctl_swapusage
2443(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2444{
2445 int error;
2446 uint64_t swap_total;
2447 uint64_t swap_avail;
2448 vm_size_t swap_pagesize;
2449 boolean_t swap_encrypted;
2450 struct xsw_usage xsu;
2451
2452 error = macx_swapinfo(&swap_total,
2453 &swap_avail,
2454 &swap_pagesize,
2455 &swap_encrypted);
2456 if (error)
2457 return error;
2458
2459 xsu.xsu_total = swap_total;
2460 xsu.xsu_avail = swap_avail;
2461 xsu.xsu_used = swap_total - swap_avail;
2462 xsu.xsu_pagesize = swap_pagesize;
2463 xsu.xsu_encrypted = swap_encrypted;
2464 return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2465}
2466
2467
2468
2469SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2470 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2471 0, 0, sysctl_swapusage, "S,xsw_usage", "");
2472
2473#if CONFIG_FREEZE
2474extern void vm_page_reactivate_all_throttled(void);
2475
2476static int
2477sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2478{
2479#pragma unused(arg1, arg2)
2480 int error, val = memorystatus_freeze_enabled ? 1 : 0;
2481 boolean_t disabled;
2482
2483 error = sysctl_handle_int(oidp, &val, 0, req);
2484 if (error || !req->newptr)
2485 return (error);
2486
2487 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
2488 //assert(req->newptr);
2489 printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2490 return EINVAL;
2491 }
2492
2493 /*
2494 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2495 */
2496 disabled = (!val && memorystatus_freeze_enabled);
2497
2498 memorystatus_freeze_enabled = val ? TRUE : FALSE;
2499
2500 if (disabled) {
2501 vm_page_reactivate_all_throttled();
2502 }
2503
2504 return (0);
2505}
2506
2507SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2508#endif /* CONFIG_FREEZE */
2509
2510/* this kernel does NOT implement shared_region_make_private_np() */
2511SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2512 CTLFLAG_RD | CTLFLAG_LOCKED,
2513 (int *)NULL, 0, "");
2514
2515STATIC int
2516fetch_process_cputype(
2517 proc_t cur_proc,
2518 int *name,
2519 u_int namelen,
2520 cpu_type_t *cputype)
2521{
2522 proc_t p = PROC_NULL;
2523 int refheld = 0;
2524 cpu_type_t ret = 0;
2525 int error = 0;
2526
2527 if (namelen == 0)
2528 p = cur_proc;
2529 else if (namelen == 1) {
2530 p = proc_find(name[0]);
2531 if (p == NULL)
2532 return (EINVAL);
2533 refheld = 1;
2534 } else {
2535 error = EINVAL;
2536 goto out;
2537 }
2538
2539 ret = cpu_type() & ~CPU_ARCH_MASK;
2540 if (IS_64BIT_PROCESS(p))
2541 ret |= CPU_ARCH_ABI64;
2542
2543 *cputype = ret;
2544
2545 if (refheld != 0)
2546 proc_rele(p);
2547out:
2548 return (error);
2549}
2550
2551STATIC int
2552sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2553 struct sysctl_req *req)
2554{
2555 int error;
2556 cpu_type_t proc_cputype = 0;
2557 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2558 return error;
2559 int res = 1;
2560 if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2561 res = 0;
2562 return SYSCTL_OUT(req, &res, sizeof(res));
2563}
2564SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2565
2566STATIC int
2567sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2568 struct sysctl_req *req)
2569{
2570 int error;
2571 cpu_type_t proc_cputype = 0;
2572 if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2573 return error;
2574 return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2575}
2576SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2577
2578STATIC int
2579sysctl_safeboot
2580(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2581{
2582 return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2583}
2584
2585SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2586 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2587 0, 0, sysctl_safeboot, "I", "");
2588
2589STATIC int
2590sysctl_singleuser
2591(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2592{
2593 return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2594}
2595
2596SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2597 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2598 0, 0, sysctl_singleuser, "I", "");
2599
2600/*
2601 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2602 */
2603extern boolean_t affinity_sets_enabled;
2604extern int affinity_sets_mapping;
2605
2606SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2607 CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2608SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2609 CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2610
2611/*
2612 * Boolean indicating if KASLR is active.
2613 */
2614STATIC int
2615sysctl_slide
2616(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2617{
2618 uint32_t slide;
2619
2620 slide = vm_kernel_slide ? 1 : 0;
2621
2622 return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2623}
2624
2625SYSCTL_PROC(_kern, OID_AUTO, slide,
2626 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2627 0, 0, sysctl_slide, "I", "");
2628
2629/*
2630 * Limit on total memory users can wire.
2631 *
2632 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2633 *
2634 * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
2635 *
2636 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2637 * kmem_init().
2638 *
2639 * All values are in bytes.
2640 */
2641
2642vm_map_size_t vm_global_no_user_wire_amount;
2643vm_map_size_t vm_global_user_wire_limit;
2644vm_map_size_t vm_user_wire_limit;
2645
2646/*
2647 * There needs to be a more automatic/elegant way to do this
2648 */
2649SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2650SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2651SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
2652
2653extern int vm_map_copy_overwrite_aligned_src_not_internal;
2654extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2655extern int vm_map_copy_overwrite_aligned_src_large;
2656SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2657SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2658SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
2659
2660
2661extern uint32_t vm_page_external_count;
2662extern uint32_t vm_page_filecache_min;
2663
2664SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2665SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2666
2667extern int vm_compressor_mode;
2668extern int vm_compressor_is_active;
2669extern uint32_t swapout_target_age;
2670extern int64_t compressor_bytes_used;
2671extern uint32_t compressor_eval_period_in_msecs;
2672extern uint32_t compressor_sample_min_in_msecs;
2673extern uint32_t compressor_sample_max_in_msecs;
2674extern uint32_t compressor_thrashing_threshold_per_10msecs;
2675extern uint32_t compressor_thrashing_min_per_10msecs;
2676extern uint32_t vm_compressor_minorcompact_threshold_divisor;
2677extern uint32_t vm_compressor_majorcompact_threshold_divisor;
2678extern uint32_t vm_compressor_unthrottle_threshold_divisor;
2679extern uint32_t vm_compressor_catchup_threshold_divisor;
2680
2681SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
2682SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
2683SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2684SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
2685
2686SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2687SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2688SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2689SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2690SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2691SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2692SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2693SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2694SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2695
2696SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2697
2698#if CONFIG_PHANTOM_CACHE
2699extern uint32_t phantom_cache_thrashing_threshold;
2700extern uint32_t phantom_cache_eval_period_in_msecs;
2701extern uint32_t phantom_cache_thrashing_threshold_ssd;
2702
2703
2704SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2705SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2706SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2707#endif
2708
2709#if (DEVELOPMENT || DEBUG)
2710
2711SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
2712 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2713 &vm_page_creation_throttled_hard, 0, "");
2714
2715SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
2716 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2717 &vm_page_creation_throttled_soft, 0, "");
2718
2719#endif /* DEVELOPMENT || DEBUG */
2720
2721/*
2722 * Enable tracing of voucher contents
2723 */
2724extern uint32_t ipc_voucher_trace_contents;
2725
2726SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
2727 CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
2728
2729/*
2730 * Kernel stack size and depth
2731 */
2732SYSCTL_INT (_kern, OID_AUTO, stack_size,
2733 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
2734SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
2735 CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
2736
2737/*
2738 * enable back trace for port allocations
2739 */
2740extern int ipc_portbt;
2741
2742SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
2743 CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2744 &ipc_portbt, 0, "");
2745
2746/*
2747 * Scheduler sysctls
2748 */
2749
2750/*
2751 * See osfmk/kern/sched_prim.c for the corresponding definition
2752 * in osfmk/. If either version changes, update the other.
2753 */
2754#define SCHED_STRING_MAX_LENGTH (48)
2755
2756extern char sched_string[SCHED_STRING_MAX_LENGTH];
2757SYSCTL_STRING(_kern, OID_AUTO, sched,
2758 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2759 sched_string, sizeof(sched_string),
2760 "Timeshare scheduler implementation");
2761
2762/*
2763 * Only support runtime modification on embedded platforms
2764 * with development config enabled
2765 */
2766
2767
2768/* Parameters related to timer coalescing tuning, to be replaced
2769 * with a dedicated systemcall in the future.
2770 */
2771/* Enable processing pending timers in the context of any other interrupt
2772 * Coalescing tuning parameters for various thread/task attributes */
2773STATIC int
2774sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2775{
2776#pragma unused(oidp)
2777 int size = arg2; /* subcommand*/
2778 int error;
2779 int changed = 0;
2780 uint64_t old_value_ns;
2781 uint64_t new_value_ns;
2782 uint64_t value_abstime;
2783 if (size == sizeof(uint32_t))
2784 value_abstime = *((uint32_t *)arg1);
2785 else if (size == sizeof(uint64_t))
2786 value_abstime = *((uint64_t *)arg1);
2787 else return ENOTSUP;
2788
2789 absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
2790 error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
2791 if ((error) || (!changed))
2792 return error;
2793
2794 nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
2795 if (size == sizeof(uint32_t))
2796 *((uint32_t *)arg1) = (uint32_t)value_abstime;
2797 else
2798 *((uint64_t *)arg1) = value_abstime;
2799 return error;
2800}
2801
2802SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
2803 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2804 &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
2805SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
2806 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2807 &tcoal_prio_params.timer_resort_threshold_abstime,
2808 sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
2809 sysctl_timer_user_us_kernel_abstime,
2810 "Q", "");
2811SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
2812 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2813 &tcoal_prio_params.timer_coalesce_bg_abstime_max,
2814 sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
2815 sysctl_timer_user_us_kernel_abstime,
2816 "Q", "");
2817
2818SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
2819 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2820 &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
2821
2822SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
2823 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2824 &tcoal_prio_params.timer_coalesce_kt_abstime_max,
2825 sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
2826 sysctl_timer_user_us_kernel_abstime,
2827 "Q", "");
2828
2829SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
2830 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2831 &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
2832
2833SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
2834 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2835 &tcoal_prio_params.timer_coalesce_fp_abstime_max,
2836 sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
2837 sysctl_timer_user_us_kernel_abstime,
2838 "Q", "");
2839
2840SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
2841 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2842 &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
2843
2844SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
2845 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2846 &tcoal_prio_params.timer_coalesce_ts_abstime_max,
2847 sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
2848 sysctl_timer_user_us_kernel_abstime,
2849 "Q", "");
2850
2851SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
2852 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2853 &tcoal_prio_params.latency_qos_scale[0], 0, "");
2854
2855SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
2856 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2857 &tcoal_prio_params.latency_qos_abstime_max[0],
2858 sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
2859 sysctl_timer_user_us_kernel_abstime,
2860 "Q", "");
2861
2862SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
2863 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2864 &tcoal_prio_params.latency_qos_scale[1], 0, "");
2865
2866SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
2867 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2868 &tcoal_prio_params.latency_qos_abstime_max[1],
2869 sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
2870 sysctl_timer_user_us_kernel_abstime,
2871 "Q", "");
2872
2873SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
2874 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2875 &tcoal_prio_params.latency_qos_scale[2], 0, "");
2876
2877SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
2878 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2879 &tcoal_prio_params.latency_qos_abstime_max[2],
2880 sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
2881 sysctl_timer_user_us_kernel_abstime,
2882 "Q", "");
2883
2884SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
2885 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2886 &tcoal_prio_params.latency_qos_scale[3], 0, "");
2887
2888SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
2889 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2890 &tcoal_prio_params.latency_qos_abstime_max[3],
2891 sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
2892 sysctl_timer_user_us_kernel_abstime,
2893 "Q", "");
2894
2895SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
2896 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2897 &tcoal_prio_params.latency_qos_scale[4], 0, "");
2898
2899SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
2900 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2901 &tcoal_prio_params.latency_qos_abstime_max[4],
2902 sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
2903 sysctl_timer_user_us_kernel_abstime,
2904 "Q", "");
2905
2906SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
2907 CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2908 &tcoal_prio_params.latency_qos_scale[5], 0, "");
2909
2910SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
2911 CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2912 &tcoal_prio_params.latency_qos_abstime_max[5],
2913 sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
2914 sysctl_timer_user_us_kernel_abstime,
2915 "Q", "");
2916
2917/* Communicate the "user idle level" heuristic to the timer layer, and
2918 * potentially other layers in the future.
2919 */
2920
2921static int
2922timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
2923 int new_value = 0, old_value = 0, changed = 0, error;
2924
2925 old_value = timer_get_user_idle_level();
2926
2927 error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2928
2929 if (error == 0 && changed) {
2930 if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
2931 error = ERANGE;
2932 }
2933
2934 return error;
2935}
2936
2937SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
2938 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2939 0, 0,
2940 timer_user_idle_level, "I", "User idle level heuristic, 0-128");
2941
2942#if HYPERVISOR
2943SYSCTL_INT(_kern, OID_AUTO, hv_support,
2944 CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2945 &hv_support_available, 0, "");
2946#endif