+ rval = sysctl_handle_string(oidp, arg1, arg2, req);
+
+ if (req->newptr) {
+ IORegistrySetOSBuildVersion((char *)arg1);
+ }
+
+ return rval;
+}
+
+SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
+ osversion, 256 /* OSVERSIZE*/,
+ sysctl_osversion, "A", "");
+
+STATIC int
+sysctl_sysctl_bootargs
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error;
+ char buf[256];
+
+ strlcpy(buf, PE_boot_args(), 256);
+ error = sysctl_io_string(req, buf, 256, 0, NULL);
+ return(error);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, bootargs,
+ CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
+ NULL, 0,
+ sysctl_sysctl_bootargs, "A", "bootargs");
+
+SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &maxfiles, 0, "");
+SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, ARG_MAX, "");
+SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, _POSIX_VERSION, "");
+SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, NGROUPS_MAX, "");
+SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, 1, "");
+#if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
+SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, 1, "");
+#else
+SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ NULL, 0, "");
+#endif
+SYSCTL_INT(_kern, OID_AUTO, num_files,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &nfiles, 0, "");
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &numvnodes, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, num_tasks,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &task_max, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, num_threads,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &thread_max, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &task_threadmax, 0, "");
+
+STATIC int
+sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int oldval = desiredvnodes;
+ int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
+
+ if (oldval != desiredvnodes) {
+ reset_vmobjectcache(oldval, desiredvnodes);
+ resize_namecache(desiredvnodes);
+ }
+
+ return(error);
+}
+
+SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &nc_disabled, 0, "");
+
+SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_maxvnodes, "I", "");
+
+SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_maxproc, "I", "");
+
+SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_aiomax, "I", "");
+
+SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_aioprocmax, "I", "");
+
+SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_aiothreads, "I", "");
+
+STATIC int
+sysctl_securelvl
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, changed;
+ int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
+ if (changed) {
+ if (!(new_value < securelevel && req->p->p_pid != 1)) {
+ proc_list_lock();
+ securelevel = new_value;
+ proc_list_unlock();
+ } else {
+ error = EPERM;
+ }
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_securelvl, "I", "");
+
+
+STATIC int
+sysctl_domainname
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error, changed;
+ error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
+ if (changed) {
+ domainnamelen = strlen(domainname);
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_domainname, "A", "");
+
+SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &hostid, 0, "");
+
+STATIC int
+sysctl_hostname
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error, changed;
+ error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
+ if (changed) {
+ hostnamelen = req->newlen;
+ }
+ return(error);
+}
+
+
+SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_hostname, "A", "");
+
+STATIC int
+sysctl_procname
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ /* Original code allowed writing, I'm copying this, although this all makes
+ no sense to me. Besides, this sysctl is never used. */
+ return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
+ 0, 0, sysctl_procname, "A", "");
+
+SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &speculative_reads_disabled, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &ignore_is_ssd, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, preheat_pages_max,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &preheat_pages_max, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, preheat_pages_min,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &preheat_pages_min, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &speculative_prefetch_max, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &speculative_prefetch_max_iosize, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_free_target, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_free_min, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_free_reserved, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_speculative_percentage, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_speculative_q_age_ms, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_max_delayed_work_limit, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_max_batch, 0, "");
+
+
+STATIC int
+sysctl_boottime
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ time_t tv_sec = boottime_sec();
+ struct proc *p = req->p;
+
+ if (proc_is64bit(p)) {
+ struct user64_timeval t;
+ t.tv_sec = tv_sec;
+ t.tv_usec = 0;
+ return sysctl_io_opaque(req, &t, sizeof(t), NULL);
+ } else {
+ struct user32_timeval t;
+ t.tv_sec = tv_sec;
+ t.tv_usec = 0;
+ return sysctl_io_opaque(req, &t, sizeof(t), NULL);
+ }
+}
+
+SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_boottime, "S,timeval", "");
+
+STATIC int
+sysctl_symfile
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ char *str;
+ int error = get_kernel_symfile(req->p, &str);
+ if (error)
+ return (error);
+ return sysctl_io_string(req, str, 0, 0, NULL);
+}
+
+
+SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_symfile, "A", "");
+
+#if NFSCLIENT
+STATIC int
+sysctl_netboot
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_netboot, "I", "");
+#endif
+
+#ifdef CONFIG_IMGSRC_ACCESS
+/*
+ * Legacy--act as if only one layer of nesting is possible.
+ */
+STATIC int
+sysctl_imgsrcdev
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ vfs_context_t ctx = vfs_context_current();
+ vnode_t devvp;
+ int result;
+
+ if (!vfs_context_issuser(ctx)) {
+ return EPERM;
+ }
+
+ if (imgsrc_rootvnodes[0] == NULL) {
+ return ENOENT;
+ }
+
+ result = vnode_getwithref(imgsrc_rootvnodes[0]);
+ if (result != 0) {
+ return result;
+ }
+
+ devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
+ result = vnode_getwithref(devvp);
+ if (result != 0) {
+ goto out;
+ }
+
+ result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
+
+ vnode_put(devvp);
+out:
+ vnode_put(imgsrc_rootvnodes[0]);
+ return result;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_imgsrcdev, "I", "");
+
+STATIC int
+sysctl_imgsrcinfo
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error;
+ struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
+ uint32_t i;
+ vnode_t rvp, devvp;
+
+ if (imgsrc_rootvnodes[0] == NULLVP) {
+ return ENXIO;
+ }
+
+ for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
+ /*
+ * Go get the root vnode.
+ */
+ rvp = imgsrc_rootvnodes[i];
+ if (rvp == NULLVP) {
+ break;
+ }
+
+ error = vnode_get(rvp);
+ if (error != 0) {
+ return error;
+ }
+
+ /*
+ * For now, no getting at a non-local volume.
+ */
+ devvp = vnode_mount(rvp)->mnt_devvp;
+ if (devvp == NULL) {
+ vnode_put(rvp);
+ return EINVAL;
+ }
+
+ error = vnode_getwithref(devvp);
+ if (error != 0) {
+ vnode_put(rvp);
+ return error;
+ }
+
+ /*
+ * Fill in info.
+ */
+ info[i].ii_dev = vnode_specrdev(devvp);
+ info[i].ii_flags = 0;
+ info[i].ii_height = i;
+ bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
+
+ vnode_put(devvp);
+ vnode_put(rvp);
+ }
+
+ return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
+ CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_imgsrcinfo, "I", "");
+
+#endif /* CONFIG_IMGSRC_ACCESS */
+
+SYSCTL_INT(_kern, OID_AUTO, timer_coalescing_enabled,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &mach_timer_coalescing_enabled, 0, "");
+
+STATIC int
+sysctl_usrstack
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_usrstack, "I", "");
+
+STATIC int
+sysctl_usrstack64
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_usrstack64, "Q", "");
+
+SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ corefilename, sizeof(corefilename), "");
+
+STATIC int
+sysctl_coredump
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+#ifdef SECURE_KERNEL
+ return (ENOTSUP);
+#endif
+ int new_value, changed;
+ int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
+ if (changed) {
+ if ((new_value == 0) || (new_value == 1))
+ do_coredump = new_value;
+ else
+ error = EINVAL;
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_coredump, "I", "");
+
+STATIC int
+sysctl_suid_coredump
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+#ifdef SECURE_KERNEL
+ return (ENOTSUP);
+#endif
+ int new_value, changed;
+ int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
+ if (changed) {
+ if ((new_value == 0) || (new_value == 1))
+ sugid_coredump = new_value;
+ else
+ error = EINVAL;
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_suid_coredump, "I", "");
+
+STATIC int
+sysctl_delayterm
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ struct proc *p = req->p;
+ int new_value, changed;
+ int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
+ if (changed) {
+ proc_lock(p);
+ if (new_value)
+ req->p->p_lflag |= P_LDELAYTERM;
+ else
+ req->p->p_lflag &= ~P_LDELAYTERM;
+ proc_unlock(p);
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_delayterm, "I", "");
+
+
+STATIC int
+sysctl_rage_vnode
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ struct proc *p = req->p;
+ struct uthread *ut;
+ int new_value, old_value, changed;
+ int error;
+
+ ut = get_bsdthread_info(current_thread());
+
+ if (ut->uu_flag & UT_RAGE_VNODES)
+ old_value = KERN_RAGE_THREAD;
+ else if (p->p_lflag & P_LRAGE_VNODES)
+ old_value = KERN_RAGE_PROC;
+ else
+ old_value = 0;
+
+ error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
+
+ if (error == 0) {
+ switch (new_value) {
+ case KERN_RAGE_PROC:
+ proc_lock(p);
+ p->p_lflag |= P_LRAGE_VNODES;
+ proc_unlock(p);
+ break;
+ case KERN_UNRAGE_PROC:
+ proc_lock(p);
+ p->p_lflag &= ~P_LRAGE_VNODES;
+ proc_unlock(p);
+ break;
+
+ case KERN_RAGE_THREAD:
+ ut->uu_flag |= UT_RAGE_VNODES;
+ break;
+ case KERN_UNRAGE_THREAD:
+ ut = get_bsdthread_info(current_thread());
+ ut->uu_flag &= ~UT_RAGE_VNODES;
+ break;
+ }
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
+ 0, 0, sysctl_rage_vnode, "I", "");
+
+/* XXX move this interface into libproc and remove this sysctl */
+STATIC int
+sysctl_setthread_cpupercent
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, old_value;
+ int error = 0;
+ kern_return_t kret = KERN_SUCCESS;
+ uint8_t percent = 0;
+ int ms_refill = 0;
+
+ old_value = 0;
+
+ if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
+ return (error);
+
+ percent = new_value & 0xff; /* low 8 bytes for perent */
+ ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
+ if (percent > 100)
+ return (EINVAL);
+
+ /*
+ * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
+ */
+ if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * NSEC_PER_MSEC)) != 0)
+ return (EIO);
+
+ return (0);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY,
+ 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
+
+
+STATIC int
+sysctl_kern_check_openevt
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ struct proc *p = req->p;
+ int new_value, old_value, changed;
+ int error;
+
+ if (p->p_flag & P_CHECKOPENEVT) {
+ old_value = KERN_OPENEVT_PROC;
+ } else {
+ old_value = 0;
+ }
+
+ error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
+
+ if (error == 0) {
+ switch (new_value) {
+ case KERN_OPENEVT_PROC:
+ OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
+ break;
+
+ case KERN_UNOPENEVT_PROC:
+ OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
+ 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
+
+
+
+STATIC int
+sysctl_nx
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+#ifdef SECURE_KERNEL
+ return ENOTSUP;
+#endif
+ int new_value, changed;
+ int error;
+
+ error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
+ if (error)
+ return error;
+
+ if (changed) {
+#if defined(__i386__) || defined(__x86_64__)
+ /*
+ * Only allow setting if NX is supported on the chip
+ */
+ if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
+ return ENOTSUP;
+#endif
+ nx_enabled = new_value;
+ }
+ return(error);
+}
+
+
+
+SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ 0, 0, sysctl_nx, "I", "");
+
+STATIC int
+sysctl_loadavg
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ if (proc_is64bit(req->p)) {
+ struct user64_loadavg loadinfo64;
+ fill_loadavg64(&averunnable, &loadinfo64);
+ return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
+ } else {
+ struct user32_loadavg loadinfo32;
+ fill_loadavg32(&averunnable, &loadinfo32);
+ return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
+ }
+}
+
+SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_loadavg, "S,loadavg", "");
+
+/*
+ * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse()
+ */
+STATIC int
+sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
+ __unused int arg2, struct sysctl_req *req)
+{
+ int old_value=0, new_value=0, error=0;
+
+ if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
+ return(error);
+ error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
+ if (!error) {
+ return (vm_toggle_entry_reuse(new_value, NULL));
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
+
+STATIC int
+sysctl_swapusage
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error;
+ uint64_t swap_total;
+ uint64_t swap_avail;
+ vm_size_t swap_pagesize;
+ boolean_t swap_encrypted;
+ struct xsw_usage xsu;
+
+ error = macx_swapinfo(&swap_total,
+ &swap_avail,
+ &swap_pagesize,
+ &swap_encrypted);
+ if (error)
+ return error;
+
+ xsu.xsu_total = swap_total;
+ xsu.xsu_avail = swap_avail;
+ xsu.xsu_used = swap_total - swap_avail;
+ xsu.xsu_pagesize = swap_pagesize;
+ xsu.xsu_encrypted = swap_encrypted;
+ return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
+}
+
+
+
+SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_swapusage, "S,xsw_usage", "");
+
+#if CONFIG_FREEZE
+extern void vm_page_reactivate_all_throttled(void);
+
+static int
+sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, val = memorystatus_freeze_enabled ? 1 : 0;
+ boolean_t disabled;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ /*
+ * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
+ */
+ disabled = (!val && memorystatus_freeze_enabled);
+
+ memorystatus_freeze_enabled = val ? TRUE : FALSE;
+
+ if (disabled) {
+ vm_page_reactivate_all_throttled();
+ }
+
+ return (0);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
+#endif /* CONFIG_FREEZE */
+
+/* this kernel does NOT implement shared_region_make_private_np() */
+SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ (int *)NULL, 0, "");
+
+#if defined(__i386__) || defined(__x86_64__)
+STATIC int
+sysctl_sysctl_exec_affinity(__unused struct sysctl_oid *oidp,
+ __unused void *arg1, __unused int arg2,
+ struct sysctl_req *req)
+{
+ proc_t cur_proc = req->p;
+ int error;
+
+ if (req->oldptr != USER_ADDR_NULL) {
+ cpu_type_t oldcputype = (cur_proc->p_flag & P_AFFINITY) ? CPU_TYPE_POWERPC : CPU_TYPE_I386;
+ if ((error = SYSCTL_OUT(req, &oldcputype, sizeof(oldcputype))))
+ return error;
+ }
+
+ if (req->newptr != USER_ADDR_NULL) {
+ cpu_type_t newcputype;
+ if ((error = SYSCTL_IN(req, &newcputype, sizeof(newcputype))))
+ return error;
+ if (newcputype == CPU_TYPE_I386)
+ OSBitAndAtomic(~((uint32_t)P_AFFINITY), &cur_proc->p_flag);
+ else if (newcputype == CPU_TYPE_POWERPC)
+ OSBitOrAtomic(P_AFFINITY, &cur_proc->p_flag);
+ else
+ return (EINVAL);
+ }
+
+ return 0;
+}
+SYSCTL_PROC(_sysctl, OID_AUTO, proc_exec_affinity, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_exec_affinity ,"I","proc_exec_affinity");
+#endif
+
+STATIC int
+fetch_process_cputype(
+ proc_t cur_proc,
+ int *name,
+ u_int namelen,
+ cpu_type_t *cputype)
+{
+ proc_t p = PROC_NULL;
+ int refheld = 0;
+ cpu_type_t ret = 0;
+ int error = 0;
+
+ if (namelen == 0)
+ p = cur_proc;
+ else if (namelen == 1) {
+ p = proc_find(name[0]);
+ if (p == NULL)
+ return (EINVAL);
+ refheld = 1;
+ } else {
+ error = EINVAL;
+ goto out;
+ }
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (p->p_flag & P_TRANSLATED) {
+ ret = CPU_TYPE_POWERPC;
+ }
+ else
+#endif
+ {
+ ret = cpu_type();
+ if (IS_64BIT_PROCESS(p))
+ ret |= CPU_ARCH_ABI64;
+ }
+ *cputype = ret;
+
+ if (refheld != 0)
+ proc_rele(p);
+out:
+ return (error);
+}
+
+STATIC int
+sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
+ struct sysctl_req *req)
+{
+ int error;
+ cpu_type_t proc_cputype = 0;
+ if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
+ return error;
+ int res = 1;
+ if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
+ res = 0;
+ return SYSCTL_OUT(req, &res, sizeof(res));
+}
+SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
+
+STATIC int
+sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
+ struct sysctl_req *req)
+{
+ int error;
+ cpu_type_t proc_cputype = 0;
+ if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
+ return error;
+ return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
+}
+SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
+
+STATIC int
+sysctl_safeboot
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_safeboot, "I", "");
+
+STATIC int
+sysctl_singleuser
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, singleuser,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_singleuser, "I", "");
+
+/*
+ * Controls for debugging affinity sets - see osfmk/kern/affinity.c
+ */
+extern boolean_t affinity_sets_enabled;
+extern int affinity_sets_mapping;
+
+SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
+ CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
+SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
+
+/*
+ * Boolean indicating if KASLR is active.
+ */
+STATIC int
+sysctl_slide
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ uint32_t slide;
+
+ slide = vm_kernel_slide ? 1 : 0;
+
+ return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, slide,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_slide, "I", "");
+
+/*
+ * Limit on total memory users can wire.
+ *
+ * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
+ *
+ * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value.
+ *
+ * These values are initialized to reasonable defaults at boot time based on the available physical memory in
+ * kmem_init().
+ *
+ * All values are in bytes.
+ */
+
+vm_map_size_t vm_global_no_user_wire_amount;
+vm_map_size_t vm_global_user_wire_limit;
+vm_map_size_t vm_user_wire_limit;
+
+/*
+ * There needs to be a more automatic/elegant way to do this
+ */
+SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
+SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
+SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
+
+extern int vm_map_copy_overwrite_aligned_src_not_internal;
+extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
+extern int vm_map_copy_overwrite_aligned_src_large;
+SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
+
+
+/*
+ * enable back trace events for thread blocks
+ */
+
+extern uint32_t kdebug_thread_block;
+
+SYSCTL_INT (_kern, OID_AUTO, kdebug_thread_block,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &kdebug_thread_block, 0, "kdebug thread_block");
+
+/*
+ * Kernel stack size and depth
+ */
+SYSCTL_INT (_kern, OID_AUTO, stack_size,
+ CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
+SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
+ CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
+
+/*
+ * enable back trace for port allocations
+ */
+extern int ipc_portbt;
+
+SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &ipc_portbt, 0, "");
+
+/*
+ * Scheduler sysctls
+ */
+
+/*
+ * See osfmk/kern/sched_prim.c for the corresponding definition
+ * in osfmk/. If either version changes, update the other.
+ */
+#define SCHED_STRING_MAX_LENGTH (48)
+
+extern char sched_string[SCHED_STRING_MAX_LENGTH];
+SYSCTL_STRING(_kern, OID_AUTO, sched,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ sched_string, sizeof(sched_string),
+ "Timeshare scheduler implementation");
+
+/*
+ * Only support runtime modification on embedded platforms
+ * with development config enabled
+ */
+#if CONFIG_EMBEDDED
+#if !SECURE_KERNEL
+extern int precise_user_kernel_time;
+SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
+#endif
+#endif