#include <sys/user.h>
#include <sys/aio_kern.h>
#include <sys/reboot.h>
+#include <sys/memory_maintenance.h>
+#include <sys/priv.h>
+#include <stdatomic.h>
#include <security/audit/audit.h>
#include <kern/kalloc.h>
+#include <machine/smp.h>
#include <mach/machine.h>
#include <mach/mach_host.h>
#include <mach/mach_types.h>
+#include <mach/processor_info.h>
#include <mach/vm_param.h>
+#include <kern/debug.h>
#include <kern/mach_param.h>
#include <kern/task.h>
#include <kern/thread.h>
+#include <kern/thread_group.h>
#include <kern/processor.h>
+#include <kern/cpu_number.h>
#include <kern/debug.h>
+#include <kern/sched_prim.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <mach/host_info.h>
#include <vm/vm_protos.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_compressor_algorithms.h>
#include <sys/imgsrc.h>
#include <kern/timer_call.h>
extern int lowpri_IO_delay_msecs;
extern int nx_enabled;
extern int speculative_reads_disabled;
-extern int ignore_is_ssd;
extern unsigned int speculative_prefetch_max;
extern unsigned int speculative_prefetch_max_iosize;
extern unsigned int preheat_max_bytes;
extern unsigned int vm_page_speculative_percentage;
extern unsigned int vm_page_speculative_q_age_ms;
+#if (DEVELOPMENT || DEBUG)
+extern uint32_t vm_page_creation_throttled_hard;
+extern uint32_t vm_page_creation_throttled_soft;
+#endif /* DEVELOPMENT || DEBUG */
+
/*
* Conditionally allow dtrace to see these functions for debugging purposes.
*/
int
pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
proc_t p);
-__private_extern__ kern_return_t
-reset_vmobjectcache(unsigned int val1, unsigned int val2);
int
sysctl_procargs(int *name, u_int namelen, user_addr_t where,
size_t *sizep, proc_t cur_proc);
STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
-#if CONFIG_LCTX
-STATIC int sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
-#endif
int sysdoproc_callback(proc_t p, void *arg);
#if COUNT_SYSCALLS
STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
#endif /* COUNT_SYSCALLS */
+#if !CONFIG_EMBEDDED
STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
+#endif /* !CONFIG_EMBEDDED */
STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
#endif
STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+#if CONFIG_COREDUMP
STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+#endif
STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
la32->fscale = (user32_long_t)la->fscale;
}
+#if CONFIG_COREDUMP
/*
* Attributes stored in the kernel.
*/
extern char corefilename[MAXPATHLEN+1];
extern int do_coredump;
extern int sugid_coredump;
+#endif
#if COUNT_SYSCALLS
extern int do_count_syscalls;
ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
if(!ut->pth_name)
return ENOMEM;
+ } else {
+ kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
}
bzero(ut->pth_name, MAXTHREADNAMESIZE);
error = copyin(newp, ut->pth_name, newlen);
- if(error)
+ if (error) {
return error;
+ }
+
+ kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
}
return 0;
SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
+extern uint32_t sched_debug_flags;
+SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
+
+#if (DEBUG || DEVELOPMENT)
+extern boolean_t doprnt_hide_pointers;
+SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
+#endif
+
extern int get_kernel_symfile(proc_t, char **);
#if COUNT_SYSCALLS
#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
-extern int nsysent;
+extern unsigned int nsysent;
extern int syscalls_log[];
extern const char *syscallnames[];
return(1);
}
-#if CONFIG_LCTX
-STATIC int
-sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
-{
- if ((p->p_lctx == NULL) ||
- (p->p_lctx->lc_id != (pid_t)*(int*)arg))
- return(0);
- else
- return(1);
-}
-#endif
-
/*
* try over estimating by 5 procs
*/
int uidcheck = 0;
int ruidcheck = 0;
int ttycheck = 0;
+ int success = 0;
if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
return (EINVAL);
ruidcheck = 1;
break;
-#if CONFIG_LCTX
- case KERN_PROC_LCID:
- filterfn = sysdoproc_filt_KERN_PROC_LCID;
- break;
-#endif
case KERN_PROC_ALL:
break;
if (namelen)
args.uidval = name[0];
- proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
- sysdoproc_callback, &args, filterfn, name);
+ success = proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
+ sysdoproc_callback, &args, filterfn, name);
+
+ /*
+ * rdar://problem/28433391: if we can't iterate over the processes,
+ * make sure to return an error.
+ */
+
+ if (success != 0)
+ return (ENOMEM);
if (error)
return (error);
if (sessp != SESSION_NULL && sessp->s_ttyvp)
ep->e_flag = EPROC_CTTY;
}
-#if CONFIG_LCTX
- if (p->p_lctx)
- ep->e_lcid = p->p_lctx->lc_id;
-#endif
ep->e_ppid = p->p_ppid;
if (p->p_ucred) {
my_cred = kauth_cred_proc_ref(p);
if (sessp != SESSION_NULL && sessp->s_ttyvp)
ep->e_flag = EPROC_CTTY;
}
-#if CONFIG_LCTX
- if (p->p_lctx)
- ep->e_lcid = p->p_lctx->lc_id;
-#endif
ep->e_ppid = p->p_ppid;
if (p->p_ucred) {
my_cred = kauth_cred_proc_ref(p);
// user_addr_t newp = req->newptr; /* user buffer copy in address */
// size_t newlen = req->newlen; /* user buffer copy in size */
- proc_t p = current_proc();
int ret=0;
if (namelen == 0)
return(ENOTSUP);
-
- ret = suser(kauth_cred_get(), &p->p_acflag);
-#if KPERF
- /* Non-root processes may be blessed by kperf to access data
- * logged into trace.
- */
- if (ret)
- ret = kperf_access_check();
-#endif /* KPERF */
- if (ret)
- return(ret);
-
+
switch(name[0]) {
case KERN_KDEFLAGS:
case KERN_KDDFLAGS:
case KERN_KDSETREG:
case KERN_KDGETREG:
case KERN_KDREADTR:
- case KERN_KDWRITETR:
- case KERN_KDWRITEMAP:
+ case KERN_KDWRITETR:
+ case KERN_KDWRITEMAP:
+ case KERN_KDTEST:
case KERN_KDPIDTR:
case KERN_KDTHRMAP:
case KERN_KDPIDEX:
- case KERN_KDSETRTCDEC:
case KERN_KDSETBUF:
- case KERN_KDGETENTROPY:
- case KERN_KDENABLE_BG_TRACE:
- case KERN_KDDISABLE_BG_TRACE:
case KERN_KDREADCURTHRMAP:
case KERN_KDSET_TYPEFILTER:
- case KERN_KDBUFWAIT:
+ case KERN_KDBUFWAIT:
case KERN_KDCPUMAP:
-
- ret = kdbg_control(name, namelen, oldp, oldlenp);
- break;
+ case KERN_KDWRITEMAP_V3:
+ case KERN_KDWRITETR_V3:
+ ret = kdbg_control(name, namelen, oldp, oldlenp);
+ break;
default:
ret= ENOTSUP;
break;
"");
+#if !CONFIG_EMBEDDED
/*
* Return the top *sizep bytes of the user stack, or the entire area of the
* user stack down through the saved exec_path, whichever is smaller.
sysctl_doprocargs, /* Handler function */
NULL, /* Data pointer */
"");
+#endif /* !CONFIG_EMBEDDED */
STATIC int
sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
int pid;
kauth_cred_t my_cred;
uid_t uid;
+ int argc = -1;
if ( namelen < 1 )
return(EINVAL);
proc_rele(p);
return(EFAULT);
}
-
- size = p->p_argslen;
+
+ size = p->p_argslen;
proc_rele(p);
- if (argc_yes) {
- size += sizeof(int);
- }
- else {
+ if (argc_yes) {
+ size += sizeof(int);
+ } else {
/*
* old PROCARGS will return the executable's path and plus some
* extra space for work alignment and data tags
*/
- size += PATH_MAX + (6 * sizeof(int));
- }
+ size += PATH_MAX + (6 * sizeof(int));
+ }
size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
*sizep = size;
return (0);
}
-
+
my_cred = kauth_cred_proc_ref(p);
uid = kauth_cred_getuid(my_cred);
kauth_cred_unref(&my_cred);
arg_addr = p->user_stack - arg_size;
-
/*
* Before we can block (any VM code), make another
* reference to the map to keep it alive. We do
proc_rele(p);
return(EINVAL);
}
-
+
+ /* save off argc before releasing the proc */
+ argc = p->p_argc;
+
argslen = p->p_argslen;
/*
* Once we have a task reference we can convert that into a
return(EINVAL);
- ret = kmem_alloc(kernel_map, ©_start, round_page(arg_size));
+ ret = kmem_alloc(kernel_map, ©_start, round_page(arg_size), VM_KERN_MEMORY_BSD);
if (ret != KERN_SUCCESS) {
vm_map_deallocate(proc_map);
return(ENOMEM);
tmp, FALSE) != KERN_SUCCESS) {
kmem_free(kernel_map, copy_start,
round_page(arg_size));
+ vm_map_copy_discard(tmp);
return (EIO);
}
size = arg_size;
}
+ /*
+ * When these sysctls were introduced, the first string in the strings
+ * section was just the bare path of the executable. However, for security
+ * reasons we now prefix this string with executable_path= so it can be
+ * parsed getenv style. To avoid binary compatability issues with exising
+ * callers of this sysctl, we strip it off here if present.
+ * (rdar://problem/13746466)
+ */
+#define EXECUTABLE_KEY "executable_path="
+ if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
+ data += strlen(EXECUTABLE_KEY);
+ size -= strlen(EXECUTABLE_KEY);
+ }
+
if (argc_yes) {
/* Put processes argc as the first word in the copyout buffer */
- suword(where, p->p_argc);
+ suword(where, argc);
error = copyout(data, (where + sizeof(int)), size);
size += sizeof(int);
} else {
SYSCTL_STRING(_kern, OID_AUTO, uuid,
CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
&kernel_uuid_string[0], 0, "");
-
#if DEBUG
+#ifndef DKPR
+#define DKPR 1
+#endif
+#endif
+
+#if DKPR
int debug_kprint_syscall = 0;
char debug_kprint_syscall_process[MAXCOMLEN+1];
osversion, 256 /* OSVERSIZE*/,
sysctl_osversion, "A", "");
+static uint64_t osvariant_status = 0;
+
+STATIC int
+sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
+{
+ if (req->newptr != 0) {
+ /*
+ * Can only ever be set by launchd, and only once at boot.
+ */
+ if (req->p->p_pid != 1 || osvariant_status != 0) {
+ return EPERM;
+ }
+ }
+
+ return sysctl_handle_quad(oidp, arg1, arg2, req);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
+ CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
+ &osvariant_status, sizeof(osvariant_status),
+ sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
+
STATIC int
sysctl_sysctl_bootargs
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
{
int error;
- char buf[256];
+ /* BOOT_LINE_LENGTH */
+#if CONFIG_EMBEDDED
+ size_t boot_args_len = 256;
+#else
+ size_t boot_args_len = 1024;
+#endif
+ char buf[boot_args_len];
- strlcpy(buf, PE_boot_args(), 256);
- error = sysctl_io_string(req, buf, 256, 0, NULL);
+ strlcpy(buf, PE_boot_args(), boot_args_len);
+ error = sysctl_io_string(req, buf, boot_args_len, 0, NULL);
return(error);
}
int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
if (oldval != desiredvnodes) {
- reset_vmobjectcache(oldval, desiredvnodes);
resize_namecache(desiredvnodes);
}
SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
&sched_smt_balance, 0, "");
-#endif
+#if __arm__ || __arm64__
+extern uint32_t perfcontrol_requested_recommended_cores;
+SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores,
+ CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
+ &perfcontrol_requested_recommended_cores, 0, "");
+
+/* Scheduler perfcontrol callouts sysctls */
+SYSCTL_DECL(_kern_perfcontrol_callout);
+SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
+ "scheduler perfcontrol callouts");
+
+extern int perfcontrol_callout_stats_enabled;
+SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled,
+ CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
+ &perfcontrol_callout_stats_enabled, 0, "");
+
+extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
+ perfcontrol_callout_stat_t stat);
+
+/* On-Core Callout */
+STATIC int
+sysctl_perfcontrol_callout_stat
+(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
+{
+ perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
+ perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
+ return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
+ sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
+ sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
+ sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
+ sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
+ sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
+ sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
+ sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
+ sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
+ sysctl_perfcontrol_callout_stat, "I", "");
+
+#endif /* __arm__ || __arm64__ */
+#endif /* (DEVELOPMENT || DEBUG) */
STATIC int
sysctl_securelvl
CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
&speculative_reads_disabled, 0, "");
-SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
- CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
- &ignore_is_ssd, 0, "");
-
SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
&preheat_max_bytes, 0, "");
sysctl_boottime
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
{
- time_t tv_sec = boottime_sec();
+ struct timeval tv;
+ boottime_timeval(&tv);
struct proc *p = req->p;
if (proc_is64bit(p)) {
- struct user64_timeval t;
- t.tv_sec = tv_sec;
- t.tv_usec = 0;
+ struct user64_timeval t = {};
+ t.tv_sec = tv.tv_sec;
+ t.tv_usec = tv.tv_usec;
return sysctl_io_opaque(req, &t, sizeof(t), NULL);
} else {
- struct user32_timeval t;
- t.tv_sec = tv_sec;
- t.tv_usec = 0;
+ struct user32_timeval t = {};
+ t.tv_sec = tv.tv_sec;
+ t.tv_usec = tv.tv_usec;
return sysctl_io_opaque(req, &t, sizeof(t), NULL);
}
}
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
{
int error;
- struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
+ struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */
uint32_t i;
vnode_t rvp, devvp;
enum {
THRESHOLD, QCOUNT,
ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
- LATENCY, LATENCY_MIN, LATENCY_MAX
+ LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, PAUSES
};
extern uint64_t timer_sysctl_get(int);
extern int timer_sysctl_set(int, uint64_t);
SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
(void *) THRESHOLD, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
(void *) QCOUNT, 0, sysctl_timer, "Q", "");
-#if DEBUG
+#if DEBUG
SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
(void *) ENQUEUES, 0, sysctl_timer, "Q", "");
SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
(void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) PAUSES, 0, sysctl_timer, "Q", "");
#endif /* DEBUG */
STATIC int
CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
0, 0, sysctl_usrstack64, "Q", "");
+#if CONFIG_COREDUMP
+
SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
corefilename, sizeof(corefilename), "");
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
0, 0, sysctl_suid_coredump, "I", "");
+#endif /* CONFIG_COREDUMP */
+
STATIC int
sysctl_delayterm
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
{
if (proc_is64bit(req->p)) {
- struct user64_loadavg loadinfo64;
+ struct user64_loadavg loadinfo64 = {};
fill_loadavg64(&averunnable, &loadinfo64);
return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
} else {
- struct user32_loadavg loadinfo32;
+ struct user32_loadavg loadinfo32 = {};
fill_loadavg32(&averunnable, &loadinfo32);
return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
}
SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
+
STATIC int
sysctl_swapusage
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
uint64_t swap_avail;
vm_size_t swap_pagesize;
boolean_t swap_encrypted;
- struct xsw_usage xsu;
+ struct xsw_usage xsu = {};
error = macx_swapinfo(&swap_total,
&swap_avail,
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return (error);
-
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+
+ if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
//assert(req->newptr);
printf("Failed attempt to set vm.freeze_enabled sysctl\n");
return EINVAL;
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
0, 0, sysctl_singleuser, "I", "");
+STATIC int sysctl_minimalboot
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_minimalboot, "I", "");
+
/*
* Controls for debugging affinity sets - see osfmk/kern/affinity.c
*/
/*
* There needs to be a more automatic/elegant way to do this
*/
+#if defined(__ARM__)
+SYSCTL_INT(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, 0, "");
+#else
SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
+#endif
extern int vm_map_copy_overwrite_aligned_src_not_internal;
extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
extern int vm_compressor_mode;
+extern int vm_compressor_is_active;
+extern int vm_compressor_available;
+extern uint32_t vm_ripe_target_age;
extern uint32_t swapout_target_age;
extern int64_t compressor_bytes_used;
+extern int64_t c_segment_input_bytes;
+extern int64_t c_segment_compressed_bytes;
extern uint32_t compressor_eval_period_in_msecs;
extern uint32_t compressor_sample_min_in_msecs;
extern uint32_t compressor_sample_max_in_msecs;
extern uint32_t vm_compressor_majorcompact_threshold_divisor;
extern uint32_t vm_compressor_unthrottle_threshold_divisor;
extern uint32_t vm_compressor_catchup_threshold_divisor;
+extern uint32_t vm_compressor_time_thread;
+#if DEVELOPMENT || DEBUG
+extern vmct_stats_t vmct_stats;
+#endif
-SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[0], "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[1], "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_threads_total, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_cthreads_total, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[0], "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[1], "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[0], "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[1], "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[0], 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[1], 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, "");
+
+#endif
+
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_cabstime, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_cabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_compressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wks_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_cabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wks_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressed_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compression_failures, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_sv_compressions, "");
+
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_dabstime, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_dabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wks_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_dabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
+
+SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
+#if DEVELOPMENT || DEBUG
+extern int vm_compressor_current_codec;
+extern int vm_compressor_test_seg_wp;
+extern boolean_t vm_compressor_force_sw_wkdm;
+SYSCTL_INT(_vm, OID_AUTO, compressor_codec, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_current_codec, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_test_wp, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_test_seg_wp, 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, wksw_force, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_force_sw_wkdm, 0, "");
+extern int precompy, wkswhw;
+
+SYSCTL_INT(_vm, OID_AUTO, precompy, CTLFLAG_RW | CTLFLAG_LOCKED, &precompy, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, wkswhw, CTLFLAG_RW | CTLFLAG_LOCKED, &wkswhw, 0, "");
+extern unsigned int vm_ktrace_enabled;
+SYSCTL_INT(_vm, OID_AUTO, vm_ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ktrace_enabled, 0, "");
+#endif
+
#if CONFIG_PHANTOM_CACHE
extern uint32_t phantom_cache_thrashing_threshold;
extern uint32_t phantom_cache_eval_period_in_msecs;
SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
#endif
+#if CONFIG_BACKGROUND_QUEUE
+
+extern uint32_t vm_page_background_count;
+extern uint32_t vm_page_background_target;
+extern uint32_t vm_page_background_internal_count;
+extern uint32_t vm_page_background_external_count;
+extern uint32_t vm_page_background_mode;
+extern uint32_t vm_page_background_exclude_external;
+extern uint64_t vm_page_background_promoted_count;
+extern uint64_t vm_pageout_considered_bq_internal;
+extern uint64_t vm_pageout_considered_bq_external;
+extern uint64_t vm_pageout_rejected_bq_internal;
+extern uint64_t vm_pageout_rejected_bq_external;
+
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_internal, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_external, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
+
+#endif
+
+#if (DEVELOPMENT || DEBUG)
+
+SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_creation_throttled_hard, 0, "");
+
+SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_creation_throttled_soft, 0, "");
+
+extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
+extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
+SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
+
+extern uint32_t vm_grab_anon_overrides;
+extern uint32_t vm_grab_anon_nops;
+
+SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_overrides, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_nops, 0, "");
+
+/* log message counters for persistence mode */
+extern uint32_t oslog_p_total_msgcount;
+extern uint32_t oslog_p_metadata_saved_msgcount;
+extern uint32_t oslog_p_metadata_dropped_msgcount;
+extern uint32_t oslog_p_error_count;
+extern uint32_t oslog_p_saved_msgcount;
+extern uint32_t oslog_p_dropped_msgcount;
+extern uint32_t oslog_p_boot_dropped_msgcount;
+
+/* log message counters for streaming mode */
+extern uint32_t oslog_s_total_msgcount;
+extern uint32_t oslog_s_metadata_msgcount;
+extern uint32_t oslog_s_error_count;
+extern uint32_t oslog_s_streamed_msgcount;
+extern uint32_t oslog_s_dropped_msgcount;
+
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
+
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
+
+
+#endif /* DEVELOPMENT || DEBUG */
+
/*
* Enable tracing of voucher contents
*/
SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
+extern unsigned int kern_feature_overrides;
+SYSCTL_INT (_kern, OID_AUTO, kern_feature_overrides,
+ CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
+
/*
* enable back trace for port allocations
*/
* Scheduler sysctls
*/
-/*
- * See osfmk/kern/sched_prim.c for the corresponding definition
- * in osfmk/. If either version changes, update the other.
- */
-#define SCHED_STRING_MAX_LENGTH (48)
-
-extern char sched_string[SCHED_STRING_MAX_LENGTH];
SYSCTL_STRING(_kern, OID_AUTO, sched,
CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
sched_string, sizeof(sched_string),
* Only support runtime modification on embedded platforms
* with development config enabled
*/
+#if CONFIG_EMBEDDED
+#if !SECURE_KERNEL
+extern int precise_user_kernel_time;
+SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
+#endif
+#endif
/* Parameters related to timer coalescing tuning, to be replaced
CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
&hv_support_available, 0, "");
#endif
+
+#if CONFIG_EMBEDDED
+STATIC int
+sysctl_darkboot SYSCTL_HANDLER_ARGS
+{
+ int err = 0, value = 0;
+#pragma unused(oidp, arg1, arg2, err, value, req)
+
+ /*
+ * Handle the sysctl request.
+ *
+ * If this is a read, the function will set the value to the current darkboot value. Otherwise,
+ * we'll get the request identifier into "value" and then we can honor it.
+ */
+ if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
+ goto exit;
+ }
+
+ /* writing requested, let's process the request */
+ if (req->newptr) {
+ /* writing is protected by an entitlement */
+ if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
+ err = EPERM;
+ goto exit;
+ }
+
+ switch (value) {
+ case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
+ /*
+ * If the darkboot sysctl is unset, the NVRAM variable
+ * must be unset too. If that's not the case, it means
+ * someone is doing something crazy and not supported.
+ */
+ if (darkboot != 0) {
+ int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
+ if (ret) {
+ darkboot = 0;
+ } else {
+ err = EINVAL;
+ }
+ }
+ break;
+ case MEMORY_MAINTENANCE_DARK_BOOT_SET:
+ darkboot = 1;
+ break;
+ case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
+ /*
+ * Set the NVRAM and update 'darkboot' in case
+ * of success. Otherwise, do not update
+ * 'darkboot' and report the failure.
+ */
+ if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
+ darkboot = 1;
+ } else {
+ err = EINVAL;
+ }
+
+ break;
+ }
+ default:
+ err = EINVAL;
+ }
+ }
+
+exit:
+ return err;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, darkboot,
+ CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
+ 0, 0, sysctl_darkboot, "I", "");
+#endif
+
+/*
+ * This is set by core audio to tell tailspin (ie background tracing) how long
+ * its smallest buffer is. Background tracing can then try to make a reasonable
+ * decisions to try to avoid introducing so much latency that the buffers will
+ * underflow.
+ */
+
+int min_audio_buffer_usec;
+
+STATIC int
+sysctl_audio_buffer SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int err = 0, value = 0, changed = 0;
+ err = sysctl_io_number(req, min_audio_buffer_usec, sizeof(int), &value, &changed);
+ if (err) goto exit;
+
+ if (changed) {
+ /* writing is protected by an entitlement */
+ if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY, 0) != 0) {
+ err = EPERM;
+ goto exit;
+ }
+ min_audio_buffer_usec = value;
+ }
+exit:
+ return err;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, min_audio_buffer_usec, CTLFLAG_RW | CTLFLAG_ANYBODY, 0, 0, sysctl_audio_buffer, "I", "Minimum audio buffer size, in microseconds");
+
+#if DEVELOPMENT || DEBUG
+#include <sys/sysent.h>
+/* This should result in a fatal exception, verifying that "sysent" is
+ * write-protected.
+ */
+static int
+kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
+ uint64_t new_value = 0, old_value = 0;
+ int changed = 0, error;
+
+ error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
+ if ((error == 0) && changed) {
+ volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
+ *wraddr = 0;
+ printf("sysent[0] write succeeded\n");
+ }
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0,
+ kern_sysent_write, "I", "Attempt sysent[0] write");
+
+#endif
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
+#else
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
+#endif
+
+
+#if DEVELOPMENT || DEBUG
+
+static int
+sysctl_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int rval = 0;
+ char str[32] = "entry prelog postlog postcore";
+
+ rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+ if (rval == 0 && req->newptr) {
+ if (strncmp("entry", str, strlen("entry")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
+ } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
+ } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
+ } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+ panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
+ }
+ }
+
+ return rval;
+}
+
+static int
+sysctl_debugger_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int rval = 0;
+ char str[32] = "entry prelog postlog postcore";
+
+ rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+ if (rval == 0 && req->newptr) {
+ if (strncmp("entry", str, strlen("entry")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
+ } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
+ } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
+ } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+ DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
+ }
+ }
+
+ return rval;
+}
+
+decl_lck_spin_data(, spinlock_panic_test_lock)
+
+__attribute__((noreturn))
+static void
+spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
+{
+ lck_spin_lock(&spinlock_panic_test_lock);
+ while (1) { ; }
+}
+
+static int
+sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ if (req->newlen == 0)
+ return EINVAL;
+
+ thread_t panic_spinlock_thread;
+ /* Initialize panic spinlock */
+ lck_grp_t * panic_spinlock_grp;
+ lck_grp_attr_t * panic_spinlock_grp_attr;
+ lck_attr_t * panic_spinlock_attr;
+
+ panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
+ panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr);
+ panic_spinlock_attr = lck_attr_alloc_init();
+
+ lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
+
+
+ /* Create thread to acquire spinlock */
+ if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
+ return EBUSY;
+ }
+
+ /* Try to acquire spinlock -- should panic eventually */
+ lck_spin_lock(&spinlock_panic_test_lock);
+ while(1) { ; }
+}
+
+__attribute__((noreturn))
+static void
+simultaneous_panic_worker
+(void * arg, wait_result_t wres __unused)
+{
+ atomic_int *start_panic = (atomic_int *)arg;
+
+ while (!atomic_load(start_panic)) { ; }
+ panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
+ __builtin_unreachable();
+}
+
+static int
+sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ if (req->newlen == 0)
+ return EINVAL;
+
+ int i = 0, threads_to_create = 2 * processor_count;
+ atomic_int start_panic = 0;
+ unsigned int threads_created = 0;
+ thread_t new_panic_thread;
+
+ for (i = threads_to_create; i > 0; i--) {
+ if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
+ threads_created++;
+ }
+ }
+
+ /* FAIL if we couldn't create at least processor_count threads */
+ if (threads_created < processor_count) {
+ panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
+ threads_created, threads_to_create);
+ }
+
+ atomic_exchange(&start_panic, 1);
+ while (1) { ; }
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
+SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
+SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
+SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
+
+
+#endif /* DEVELOPMENT || DEBUG */
+
+const uint32_t thread_groups_supported = 0;
+
+STATIC int
+sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int value = thread_groups_supported;
+ return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
+ 0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
+
+static int
+sysctl_grade_cputype SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ int error = 0;
+ int type_tuple[2] = {};
+ int return_value = 0;
+
+ error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
+
+ if (error) {
+ return error;
+ }
+
+ return_value = grade_binary(type_tuple[0], type_tuple[1]);
+
+ error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
+
+ if (error) {
+ return error;
+ }
+
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
+ CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED|CTLTYPE_OPAQUE,
+ 0, 0, &sysctl_grade_cputype, "S",
+ "grade value of cpu_type_t+cpu_sub_type_t");