+ if (arg_size > argslen) {
+ data = (caddr_t) (copy_end - argslen);
+ size = argslen;
+ } else {
+ data = (caddr_t) (copy_end - arg_size);
+ size = arg_size;
+ }
+
+ if (argc_yes) {
+ /* Put processes argc as the first word in the copyout buffer */
+ suword(where, p->p_argc);
+ error = copyout(data, (where + sizeof(int)), size);
+ size += sizeof(int);
+ } else {
+ error = copyout(data, where, size);
+
+ /*
+ * Make the old PROCARGS work to return the executable's path
+ * But, only if there is enough space in the provided buffer
+ *
+ * on entry: data [possibily] points to the beginning of the path
+ *
+ * Note: we keep all pointers&sizes aligned to word boundries
+ */
+ if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
+ {
+ int binPath_sz, alignedBinPath_sz = 0;
+ int extraSpaceNeeded, addThis;
+ user_addr_t placeHere;
+ char * str = (char *) data;
+ int max_len = size;
+
+ /* Some apps are really bad about messing up their stacks
+ So, we have to be extra careful about getting the length
+ of the executing binary. If we encounter an error, we bail.
+ */
+
+ /* Limit ourselves to PATH_MAX paths */
+ if ( max_len > PATH_MAX ) max_len = PATH_MAX;
+
+ binPath_sz = 0;
+
+ while ( (binPath_sz < max_len-1) && (*str++ != 0) )
+ binPath_sz++;
+
+ /* If we have a NUL terminator, copy it, too */
+ if (binPath_sz < max_len-1) binPath_sz += 1;
+
+ /* Pre-Flight the space requiremnts */
+
+ /* Account for the padding that fills out binPath to the next word */
+ alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
+
+ placeHere = where + size;
+
+ /* Account for the bytes needed to keep placeHere word aligned */
+ addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
+
+ /* Add up all the space that is needed */
+ extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
+
+ /* is there is room to tack on argv[0]? */
+ if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
+ {
+ placeHere += addThis;
+ suword(placeHere, 0);
+ placeHere += sizeof(int);
+ suword(placeHere, 0xBFFF0000);
+ placeHere += sizeof(int);
+ suword(placeHere, 0);
+ placeHere += sizeof(int);
+ error = copyout(data, placeHere, binPath_sz);
+ if ( ! error )
+ {
+ placeHere += binPath_sz;
+ suword(placeHere, 0);
+ size += extraSpaceNeeded;
+ }
+ }
+ }
+ }
+
+ if (copy_start != (vm_offset_t) 0) {
+ kmem_free(kernel_map, copy_start, copy_end - copy_start);
+ }
+ if (error) {
+ return(error);
+ }
+
+ if (where != USER_ADDR_NULL)
+ *sizep = size;
+ return (0);
+}
+
+
+/*
+ * Max number of concurrent aio requests
+ */
+STATIC int
+sysctl_aiomax
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, changed;
+ int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
+ if (changed) {
+ /* make sure the system-wide limit is greater than the per process limit */
+ if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
+ aio_max_requests = new_value;
+ else
+ error = EINVAL;
+ }
+ return(error);
+}
+
+
+/*
+ * Max number of concurrent aio requests per process
+ */
+STATIC int
+sysctl_aioprocmax
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, changed;
+ int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
+ if (changed) {
+ /* make sure per process limit is less than the system-wide limit */
+ if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
+ aio_max_requests_per_process = new_value;
+ else
+ error = EINVAL;
+ }
+ return(error);
+}
+
+
+/*
+ * Max number of async IO worker threads
+ */
+STATIC int
+sysctl_aiothreads
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, changed;
+ int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
+ if (changed) {
+ /* we only allow an increase in the number of worker threads */
+ if (new_value > aio_worker_threads ) {
+ _aio_create_worker_threads((new_value - aio_worker_threads));
+ aio_worker_threads = new_value;
+ }
+ else
+ error = EINVAL;
+ }
+ return(error);
+}
+
+
+/*
+ * System-wide limit on the max number of processes
+ */
+STATIC int
+sysctl_maxproc
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, changed;
+ int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
+ if (changed) {
+ AUDIT_ARG(value32, new_value);
+ /* make sure the system-wide limit is less than the configured hard
+ limit set at kernel compilation */
+ if (new_value <= hard_maxproc && new_value > 0)
+ maxproc = new_value;
+ else
+ error = EINVAL;
+ }
+ return(error);
+}
+
+SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ ostype, 0, "");
+SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ osrelease, 0, "");
+SYSCTL_INT(_kern, KERN_OSREV, osrevision,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, BSD, "");
+SYSCTL_STRING(_kern, KERN_VERSION, version,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ version, 0, "");
+SYSCTL_STRING(_kern, OID_AUTO, uuid,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &kernel_uuid_string[0], 0, "");
+
+#if DEBUG
+int debug_kprint_syscall = 0;
+char debug_kprint_syscall_process[MAXCOMLEN+1];
+
+/* Thread safe: bits and string value are not used to reclaim state */
+SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
+SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
+ CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
+ "name of process for kprintf syscall tracing");
+
+int debug_kprint_current_process(const char **namep)
+{
+ struct proc *p = current_proc();
+
+ if (p == NULL) {
+ return 0;
+ }
+
+ if (debug_kprint_syscall_process[0]) {
+ /* user asked to scope tracing to a particular process name */
+ if(0 == strncmp(debug_kprint_syscall_process,
+ p->p_comm, sizeof(debug_kprint_syscall_process))) {
+ /* no value in telling the user that we traced what they asked */
+ if(namep) *namep = NULL;
+
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ /* trace all processes. Tell user what we traced */
+ if (namep) {
+ *namep = p->p_comm;
+ }
+
+ return 1;
+}
+#endif
+
+/* PR-5293665: need to use a callback function for kern.osversion to set
+ * osversion in IORegistry */
+
+STATIC int
+sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
+{
+ int rval = 0;
+
+ rval = sysctl_handle_string(oidp, arg1, arg2, req);
+
+ if (req->newptr) {
+ IORegistrySetOSBuildVersion((char *)arg1);
+ }
+
+ return rval;
+}
+
+SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
+ osversion, 256 /* OSVERSIZE*/,
+ sysctl_osversion, "A", "");
+
+STATIC int
+sysctl_sysctl_bootargs
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error;
+ char buf[256];
+
+ strlcpy(buf, PE_boot_args(), 256);
+ error = sysctl_io_string(req, buf, 256, 0, NULL);
+ return(error);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, bootargs,
+ CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
+ NULL, 0,
+ sysctl_sysctl_bootargs, "A", "bootargs");
+
+SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &maxfiles, 0, "");
+SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, ARG_MAX, "");
+SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, _POSIX_VERSION, "");
+SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, NGROUPS_MAX, "");
+SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, 1, "");
+#if 1 /* _POSIX_SAVED_IDS from <unistd.h> */
+SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ (int *)NULL, 1, "");
+#else
+SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
+ CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ NULL, 0, "");
+#endif
+SYSCTL_INT(_kern, OID_AUTO, num_files,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &nfiles, 0, "");
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &numvnodes, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, num_tasks,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &task_max, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, num_threads,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &thread_max, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &task_threadmax, 0, "");
+
+STATIC int
+sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int oldval = desiredvnodes;
+ int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
+
+ if (oldval != desiredvnodes) {
+ reset_vmobjectcache(oldval, desiredvnodes);
+ resize_namecache(desiredvnodes);
+ }
+
+ return(error);
+}
+
+SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &nc_disabled, 0, "");
+
+SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_maxvnodes, "I", "");
+
+SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_maxproc, "I", "");
+
+SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_aiomax, "I", "");
+
+SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_aioprocmax, "I", "");
+
+SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_aiothreads, "I", "");
+
+#if (DEVELOPMENT || DEBUG)
+extern int sched_smt_balance;
+SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
+ CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
+ &sched_smt_balance, 0, "");
+#endif
+
+STATIC int
+sysctl_securelvl
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, changed;
+ int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
+ if (changed) {
+ if (!(new_value < securelevel && req->p->p_pid != 1)) {
+ proc_list_lock();
+ securelevel = new_value;
+ proc_list_unlock();
+ } else {
+ error = EPERM;
+ }
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_securelvl, "I", "");
+
+
+STATIC int
+sysctl_domainname
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error, changed;
+ error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
+ if (changed) {
+ domainnamelen = strlen(domainname);
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_domainname, "A", "");
+
+SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &hostid, 0, "");
+
+STATIC int
+sysctl_hostname
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error, changed;
+ error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
+ if (changed) {
+ hostnamelen = req->newlen;
+ }
+ return(error);
+}
+
+
+SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_hostname, "A", "");
+
+STATIC int
+sysctl_procname
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ /* Original code allowed writing, I'm copying this, although this all makes
+ no sense to me. Besides, this sysctl is never used. */
+ return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
+ 0, 0, sysctl_procname, "A", "");
+
+SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &speculative_reads_disabled, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &ignore_is_ssd, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &preheat_max_bytes, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &preheat_min_bytes, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &speculative_prefetch_max, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &speculative_prefetch_max_iosize, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_free_target, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_free_min, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_free_reserved, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_speculative_percentage, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_page_speculative_q_age_ms, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_max_delayed_work_limit, 0, "");
+
+SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ &vm_max_batch, 0, "");
+
+SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
+ CTLFLAG_RD | CTLFLAG_LOCKED,
+ &bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
+
+STATIC int
+sysctl_boottime
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ time_t tv_sec = boottime_sec();
+ struct proc *p = req->p;
+
+ if (proc_is64bit(p)) {
+ struct user64_timeval t;
+ t.tv_sec = tv_sec;
+ t.tv_usec = 0;
+ return sysctl_io_opaque(req, &t, sizeof(t), NULL);
+ } else {
+ struct user32_timeval t;
+ t.tv_sec = tv_sec;
+ t.tv_usec = 0;
+ return sysctl_io_opaque(req, &t, sizeof(t), NULL);
+ }
+}
+
+SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
+ CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_boottime, "S,timeval", "");
+
+STATIC int
+sysctl_symfile
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ char *str;
+ int error = get_kernel_symfile(req->p, &str);
+ if (error)
+ return (error);
+ return sysctl_io_string(req, str, 0, 0, NULL);
+}
+
+
+SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_symfile, "A", "");
+
+#if NFSCLIENT
+STATIC int
+sysctl_netboot
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_netboot, "I", "");
+#endif
+
+#ifdef CONFIG_IMGSRC_ACCESS
+/*
+ * Legacy--act as if only one layer of nesting is possible.
+ */
+STATIC int
+sysctl_imgsrcdev
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ vfs_context_t ctx = vfs_context_current();
+ vnode_t devvp;
+ int result;
+
+ if (!vfs_context_issuser(ctx)) {
+ return EPERM;
+ }
+
+ if (imgsrc_rootvnodes[0] == NULL) {
+ return ENOENT;
+ }
+
+ result = vnode_getwithref(imgsrc_rootvnodes[0]);
+ if (result != 0) {
+ return result;
+ }
+
+ devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
+ result = vnode_getwithref(devvp);
+ if (result != 0) {
+ goto out;
+ }
+
+ result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
+
+ vnode_put(devvp);
+out:
+ vnode_put(imgsrc_rootvnodes[0]);
+ return result;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_imgsrcdev, "I", "");
+
+STATIC int
+sysctl_imgsrcinfo
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int error;
+ struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
+ uint32_t i;
+ vnode_t rvp, devvp;
+
+ if (imgsrc_rootvnodes[0] == NULLVP) {
+ return ENXIO;
+ }
+
+ for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
+ /*
+ * Go get the root vnode.
+ */
+ rvp = imgsrc_rootvnodes[i];
+ if (rvp == NULLVP) {
+ break;
+ }
+
+ error = vnode_get(rvp);
+ if (error != 0) {
+ return error;
+ }
+
+ /*
+ * For now, no getting at a non-local volume.
+ */
+ devvp = vnode_mount(rvp)->mnt_devvp;
+ if (devvp == NULL) {
+ vnode_put(rvp);
+ return EINVAL;
+ }
+
+ error = vnode_getwithref(devvp);
+ if (error != 0) {
+ vnode_put(rvp);
+ return error;
+ }
+
+ /*
+ * Fill in info.
+ */
+ info[i].ii_dev = vnode_specrdev(devvp);
+ info[i].ii_flags = 0;
+ info[i].ii_height = i;
+ bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
+
+ vnode_put(devvp);
+ vnode_put(rvp);
+ }
+
+ return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
+ CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_imgsrcinfo, "I", "");
+
+#endif /* CONFIG_IMGSRC_ACCESS */
+
+
+SYSCTL_DECL(_kern_timer);
+SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
+
+
+SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &mach_timer_coalescing_enabled, 0, "");
+
+SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &timer_deadline_tracking_bin_1, "");
+SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &timer_deadline_tracking_bin_2, "");
+
+SYSCTL_DECL(_kern_timer_longterm);
+SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
+
+
+/* Must match definition in osfmk/kern/timer_call.c */
+enum {
+ THRESHOLD, QCOUNT,
+ ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
+ LATENCY, LATENCY_MIN, LATENCY_MAX
+};
+extern uint64_t timer_sysctl_get(int);
+extern int timer_sysctl_set(int, uint64_t);
+
+STATIC int
+sysctl_timer
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int oid = (int)arg1;
+ uint64_t value = timer_sysctl_get(oid);
+ uint64_t new_value;
+ int error;
+ int changed;
+
+ error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
+ if (changed)
+ error = timer_sysctl_set(oid, new_value);
+
+ return error;
+}
+
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
+ CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) QCOUNT, 0, sysctl_timer, "Q", "");
+#if DEBUG
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) DEQUEUES, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) ESCALATES, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) SCANS, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) PREEMPTS, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) LATENCY, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
+#endif /* DEBUG */
+
+STATIC int
+sysctl_usrstack
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_usrstack, "I", "");
+
+STATIC int
+sysctl_usrstack64
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
+ CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+ 0, 0, sysctl_usrstack64, "Q", "");
+
+SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
+ CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+ corefilename, sizeof(corefilename), "");
+
+STATIC int
+sysctl_coredump
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+#ifdef SECURE_KERNEL
+ (void)req;
+ return (ENOTSUP);
+#else
+ int new_value, changed;
+ int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
+ if (changed) {
+ if ((new_value == 0) || (new_value == 1))
+ do_coredump = new_value;
+ else
+ error = EINVAL;
+ }
+ return(error);
+#endif
+}
+
+SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_coredump, "I", "");
+
+STATIC int
+sysctl_suid_coredump
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+#ifdef SECURE_KERNEL
+ (void)req;
+ return (ENOTSUP);
+#else
+ int new_value, changed;
+ int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
+ if (changed) {
+ if ((new_value == 0) || (new_value == 1))
+ sugid_coredump = new_value;
+ else
+ error = EINVAL;
+ }
+ return(error);
+#endif
+}
+
+SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_suid_coredump, "I", "");
+
+STATIC int
+sysctl_delayterm
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ struct proc *p = req->p;
+ int new_value, changed;
+ int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
+ if (changed) {
+ proc_lock(p);
+ if (new_value)
+ req->p->p_lflag |= P_LDELAYTERM;
+ else
+ req->p->p_lflag &= ~P_LDELAYTERM;
+ proc_unlock(p);
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_delayterm, "I", "");
+
+
+STATIC int
+sysctl_rage_vnode
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ struct proc *p = req->p;
+ struct uthread *ut;
+ int new_value, old_value, changed;
+ int error;
+
+ ut = get_bsdthread_info(current_thread());
+
+ if (ut->uu_flag & UT_RAGE_VNODES)
+ old_value = KERN_RAGE_THREAD;
+ else if (p->p_lflag & P_LRAGE_VNODES)
+ old_value = KERN_RAGE_PROC;
+ else
+ old_value = 0;
+
+ error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
+
+ if (error == 0) {
+ switch (new_value) {
+ case KERN_RAGE_PROC:
+ proc_lock(p);
+ p->p_lflag |= P_LRAGE_VNODES;
+ proc_unlock(p);
+ break;
+ case KERN_UNRAGE_PROC:
+ proc_lock(p);
+ p->p_lflag &= ~P_LRAGE_VNODES;
+ proc_unlock(p);
+ break;
+
+ case KERN_RAGE_THREAD:
+ ut->uu_flag |= UT_RAGE_VNODES;
+ break;
+ case KERN_UNRAGE_THREAD:
+ ut = get_bsdthread_info(current_thread());
+ ut->uu_flag &= ~UT_RAGE_VNODES;
+ break;
+ }
+ }
+ return(error);
+}
+
+SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
+ 0, 0, sysctl_rage_vnode, "I", "");
+
+/* XXX move this interface into libproc and remove this sysctl */
+STATIC int
+sysctl_setthread_cpupercent
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+ int new_value, old_value;
+ int error = 0;
+ kern_return_t kret = KERN_SUCCESS;
+ uint8_t percent = 0;
+ int ms_refill = 0;
+
+ if (!req->newptr)
+ return (0);
+
+ old_value = 0;
+
+ if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
+ return (error);
+
+ percent = new_value & 0xff; /* low 8 bytes for perent */
+ ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */
+ if (percent > 100)
+ return (EINVAL);