]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/kern_sysctl.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_sysctl.c
index c7978d82e01270e2bf692e3fa9baf59fb9ccb55c..d937e9e4ff02f37eaa4b35e5c08e357e412b4a49 100644 (file)
 #include <sys/user.h>
 #include <sys/aio_kern.h>
 #include <sys/reboot.h>
+#include <sys/memory_maintenance.h>
+#include <sys/priv.h>
+#include <stdatomic.h>
 
 #include <security/audit/audit.h>
 #include <kern/kalloc.h>
 
+#include <machine/smp.h>
 #include <mach/machine.h>
 #include <mach/mach_host.h>
 #include <mach/mach_types.h>
+#include <mach/processor_info.h>
 #include <mach/vm_param.h>
+#include <kern/debug.h>
 #include <kern/mach_param.h>
 #include <kern/task.h>
 #include <kern/thread.h>
+#include <kern/thread_group.h>
 #include <kern/processor.h>
+#include <kern/cpu_number.h>
+#include <kern/cpu_quiesce.h>
 #include <kern/debug.h>
+#include <kern/sched_prim.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <mach/host_info.h>
 
 #include <vm/vm_protos.h>
 #include <vm/vm_pageout.h>
+#include <vm/vm_compressor_algorithms.h>
 #include <sys/imgsrc.h>
 #include <kern/timer_call.h>
 
@@ -162,7 +173,6 @@ extern int lowpri_IO_window_msecs;
 extern int lowpri_IO_delay_msecs;
 extern int nx_enabled;
 extern int speculative_reads_disabled;
-extern int ignore_is_ssd;
 extern unsigned int speculative_prefetch_max;
 extern unsigned int speculative_prefetch_max_iosize;
 extern unsigned int preheat_max_bytes;
@@ -177,8 +187,6 @@ extern unsigned int vm_max_batch;
 extern unsigned int vm_page_free_min;
 extern unsigned int vm_page_free_target;
 extern unsigned int vm_page_free_reserved;
-extern unsigned int vm_page_speculative_percentage;
-extern unsigned int vm_page_speculative_q_age_ms;
 
 #if (DEVELOPMENT || DEBUG)
 extern uint32_t        vm_page_creation_throttled_hard;
@@ -223,8 +231,6 @@ netboot_root(void);
 int
 pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, 
               proc_t p);
-__private_extern__ kern_return_t
-reset_vmobjectcache(unsigned int val1, unsigned int val2);
 int
 sysctl_procargs(int *name, u_int namelen, user_addr_t where, 
                                size_t *sizep, proc_t cur_proc);
@@ -240,9 +246,6 @@ STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
 STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
 STATIC int  sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
 STATIC int  sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
-#if CONFIG_LCTX
-STATIC int  sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
-#endif
 int sysdoproc_callback(proc_t p, void *arg);
 
 
@@ -256,7 +259,9 @@ STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
 #if COUNT_SYSCALLS
 STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
 #endif /* COUNT_SYSCALLS */
+#if !CONFIG_EMBEDDED
 STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
+#endif /* !CONFIG_EMBEDDED */
 STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
 STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
 STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
@@ -280,8 +285,10 @@ STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struc
 #endif
 STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+#if CONFIG_COREDUMP
 STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+#endif
 STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
@@ -294,8 +301,16 @@ STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, s
 STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+STATIC int sysctl_minimalboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
 
+#ifdef CONFIG_XNUPOST
+#include <tests/xnupost.h>
+
+STATIC int sysctl_debug_test_oslog_ctl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+STATIC int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+STATIC int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
+#endif
 
 extern void IORegistrySetOSBuildVersion(char * build_version); 
 
@@ -317,12 +332,14 @@ fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
        la32->fscale    = (user32_long_t)la->fscale;
 }
 
+#if CONFIG_COREDUMP
 /*
  * Attributes stored in the kernel.
  */
 extern char corefilename[MAXPATHLEN+1];
 extern int do_coredump;
 extern int sugid_coredump;
+#endif
 
 #if COUNT_SYSCALLS
 extern int do_count_syscalls;
@@ -381,11 +398,16 @@ sysctl_handle_kern_threadname(    __unused struct sysctl_oid *oidp, __unused void *
                        ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
                        if(!ut->pth_name)
                                return ENOMEM;
+               } else {
+                       kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name);
                }
                bzero(ut->pth_name, MAXTHREADNAMESIZE);
                error = copyin(newp, ut->pth_name, newlen);
-               if(error)
+               if (error) {
                        return error;
+               }
+
+               kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name);
        }
                
        return 0;
@@ -459,12 +481,20 @@ sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1,
 
 SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
 
+extern uint32_t sched_debug_flags;
+SYSCTL_INT(_debug, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_debug_flags, 0, "scheduler debug");
+
+#if (DEBUG || DEVELOPMENT)
+extern boolean_t doprnt_hide_pointers;
+SYSCTL_INT(_debug, OID_AUTO, hide_kernel_pointers, CTLFLAG_RW | CTLFLAG_LOCKED, &doprnt_hide_pointers, 0, "hide kernel pointers from log");
+#endif
+
 extern int get_kernel_symfile(proc_t, char **);
 
 #if COUNT_SYSCALLS
 #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
 
-extern int     nsysent;
+extern unsigned int    nsysent;
 extern int syscalls_log[];
 extern const char *syscallnames[];
 
@@ -662,18 +692,6 @@ sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
                return(1);
 }
 
-#if CONFIG_LCTX
-STATIC int
-sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
-{
-       if ((p->p_lctx == NULL) ||
-               (p->p_lctx->lc_id != (pid_t)*(int*)arg))
-               return(0);
-       else
-               return(1);
-}
-#endif
-
 /*
  * try over estimating by 5 procs
  */
@@ -745,6 +763,7 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS
        int uidcheck = 0;
        int ruidcheck = 0;
        int ttycheck = 0;
+       int success = 0;
 
        if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
                return (EINVAL);
@@ -779,11 +798,6 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS
                        ruidcheck = 1;
                        break;
 
-#if CONFIG_LCTX
-               case KERN_PROC_LCID:
-                       filterfn = sysdoproc_filt_KERN_PROC_LCID;
-                       break;
-#endif
                case KERN_PROC_ALL:
                        break;
 
@@ -806,8 +820,16 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS
        if (namelen)
                args.uidval = name[0];
 
-       proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
-           sysdoproc_callback, &args, filterfn, name);
+       success = proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
+                     sysdoproc_callback, &args, filterfn, name);
+
+       /*
+        * rdar://problem/28433391: if we can't iterate over the processes,
+        * make sure to return an error.
+        */
+
+       if (success != 0)
+               return (ENOMEM);
 
        if (error)
                return (error);
@@ -914,10 +936,6 @@ fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
                if (sessp != SESSION_NULL && sessp->s_ttyvp)
                        ep->e_flag = EPROC_CTTY;
        }
-#if CONFIG_LCTX
-       if (p->p_lctx)
-               ep->e_lcid = p->p_lctx->lc_id;
-#endif
        ep->e_ppid = p->p_ppid;
        if (p->p_ucred) {
                my_cred = kauth_cred_proc_ref(p);
@@ -974,10 +992,6 @@ fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
                if (sessp != SESSION_NULL && sessp->s_ttyvp)
                        ep->e_flag = EPROC_CTTY;
        }
-#if CONFIG_LCTX
-       if (p->p_lctx)
-               ep->e_lcid = p->p_lctx->lc_id;
-#endif
        ep->e_ppid = p->p_ppid;
        if (p->p_ucred) {
                my_cred = kauth_cred_proc_ref(p);
@@ -1137,23 +1151,11 @@ sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
 //     user_addr_t newp = req->newptr; /* user buffer copy in address */
 //     size_t newlen = req->newlen;    /* user buffer copy in size */
 
-       proc_t p = current_proc();
        int ret=0;
 
        if (namelen == 0)
                return(ENOTSUP);
-       
-       ret = suser(kauth_cred_get(), &p->p_acflag);
-#if KPERF
-       /* Non-root processes may be blessed by kperf to access data
-        * logged into trace.
-        */
-       if (ret)
-               ret = kperf_access_check();
-#endif /* KPERF */
-       if (ret)
-               return(ret);
-       
+
        switch(name[0]) {
        case KERN_KDEFLAGS:
        case KERN_KDDFLAGS:
@@ -1164,23 +1166,21 @@ sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
        case KERN_KDSETREG:
        case KERN_KDGETREG:
        case KERN_KDREADTR:
-        case KERN_KDWRITETR:
-        case KERN_KDWRITEMAP:
+       case KERN_KDWRITETR:
+       case KERN_KDWRITEMAP:
+       case KERN_KDTEST:
        case KERN_KDPIDTR:
        case KERN_KDTHRMAP:
        case KERN_KDPIDEX:
-       case KERN_KDSETRTCDEC:
        case KERN_KDSETBUF:
-       case KERN_KDGETENTROPY:
-       case KERN_KDENABLE_BG_TRACE:
-       case KERN_KDDISABLE_BG_TRACE:
        case KERN_KDREADCURTHRMAP:
        case KERN_KDSET_TYPEFILTER:
-        case KERN_KDBUFWAIT:
+       case KERN_KDBUFWAIT:
        case KERN_KDCPUMAP:
-
-               ret = kdbg_control(name, namelen, oldp, oldlenp);
-               break;
+       case KERN_KDWRITEMAP_V3:
+       case KERN_KDWRITETR_V3:
+               ret = kdbg_control(name, namelen, oldp, oldlenp);
+               break;
        default:
                ret= ENOTSUP;
                break;
@@ -1200,6 +1200,7 @@ SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED
        "");
 
 
+#if !CONFIG_EMBEDDED
 /*
  * Return the top *sizep bytes of the user stack, or the entire area of the
  * user stack down through the saved exec_path, whichever is smaller.
@@ -1230,6 +1231,7 @@ SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LO
        sysctl_doprocargs,      /* Handler function */
        NULL,                   /* Data pointer */
        "");
+#endif /* !CONFIG_EMBEDDED */
 
 STATIC int
 sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
@@ -1273,11 +1275,13 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
        caddr_t data;
        size_t argslen=0;
        int size;
+       vm_size_t alloc_size = 0;
        vm_offset_t     copy_start, copy_end;
        kern_return_t ret;
        int pid;
        kauth_cred_t my_cred;
        uid_t uid;
+       int argc = -1;
 
        if ( namelen < 1 )
                return(EINVAL);
@@ -1324,24 +1328,23 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
                        proc_rele(p);
                        return(EFAULT);
                }
-                       
-                size = p->p_argslen;
+
+               size = p->p_argslen;
                proc_rele(p);
-                if (argc_yes) {
-                       size += sizeof(int);
-                }
-                else {
+               if (argc_yes) {
+                       size += sizeof(int);
+               } else {
                        /*
                         * old PROCARGS will return the executable's path and plus some
                         * extra space for work alignment and data tags
                         */
-                       size += PATH_MAX + (6 * sizeof(int));
-                }
+                       size += PATH_MAX + (6 * sizeof(int));
+               }
                size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
                *sizep = size;
                return (0);
        }
-       
+
        my_cred = kauth_cred_proc_ref(p);
        uid = kauth_cred_getuid(my_cred);
        kauth_cred_unref(&my_cred);
@@ -1357,7 +1360,6 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
 
        arg_addr = p->user_stack - arg_size;
 
-
        /*
         *      Before we can block (any VM code), make another
         *      reference to the map to keep it alive.  We do
@@ -1368,7 +1370,10 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
                proc_rele(p);
                return(EINVAL);
        }
-       
+
+       /* save off argc before releasing the proc */
+       argc = p->p_argc;
+
        argslen = p->p_argslen;
        /*
         * Once we have a task reference we can convert that into a
@@ -1385,12 +1390,13 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
        if (proc_map == NULL)
                return(EINVAL);
 
-
-       ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
+       alloc_size = round_page(arg_size);
+       ret = kmem_alloc(kernel_map, &copy_start, alloc_size, VM_KERN_MEMORY_BSD);
        if (ret != KERN_SUCCESS) {
                vm_map_deallocate(proc_map);
                return(ENOMEM);
        }
+       bzero((void *)copy_start, alloc_size);
 
        copy_end = round_page(copy_start + arg_size);
 
@@ -1413,6 +1419,7 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
                                  tmp, FALSE) != KERN_SUCCESS) {
                        kmem_free(kernel_map, copy_start,
                                        round_page(arg_size));
+                       vm_map_copy_discard(tmp);
                        return (EIO);
        }
 
@@ -1424,9 +1431,23 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
                size = arg_size;
        }
 
+       /*
+        * When these sysctls were introduced, the first string in the strings
+        * section was just the bare path of the executable.  However, for security
+        * reasons we now prefix this string with executable_path= so it can be
+        * parsed getenv style.  To avoid binary compatability issues with exising
+        * callers of this sysctl, we strip it off here if present.
+        * (rdar://problem/13746466)
+        */
+#define        EXECUTABLE_KEY "executable_path="
+       if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){
+               data += strlen(EXECUTABLE_KEY);
+               size -= strlen(EXECUTABLE_KEY);
+       }
+
        if (argc_yes) {
                /* Put processes argc as the first word in the copyout buffer */
-               suword(where, p->p_argc);
+               suword(where, argc);
                error = copyout(data, (where + sizeof(int)), size);
                size += sizeof(int);
        } else {
@@ -1610,7 +1631,17 @@ SYSCTL_STRING(_kern, OID_AUTO, uuid,
                CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 
                &kernel_uuid_string[0], 0, "");
 
+SYSCTL_STRING(_kern, OID_AUTO, osbuildconfig,
+               CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_MASKED,
+               &osbuild_config[0], 0, "");
+
 #if DEBUG
+#ifndef DKPR
+#define DKPR 1
+#endif
+#endif
+
+#if DKPR
 int debug_kprint_syscall = 0;
 char debug_kprint_syscall_process[MAXCOMLEN+1];
 
@@ -1673,15 +1704,65 @@ SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
         osversion, 256 /* OSVERSIZE*/, 
         sysctl_osversion, "A", "");
 
+static uint64_t osproductversion_string[48];
+
+STATIC int
+sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
+{
+       if (req->newptr != 0) {
+               /*
+                * Can only ever be set by launchd, and only once at boot.
+                */
+               if (req->p->p_pid != 1 || osproductversion_string[0] != '\0') {
+                       return EPERM;
+               }
+       }
+
+    return sysctl_handle_string(oidp, arg1, arg2, req);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, osproductversion,
+        CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
+        osproductversion_string, sizeof(osproductversion_string),
+        sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist");
+
+static uint64_t osvariant_status = 0;
+
+STATIC int
+sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
+{
+       if (req->newptr != 0) {
+               /*
+                * Can only ever be set by launchd, and only once at boot.
+                */
+               if (req->p->p_pid != 1 || osvariant_status != 0) {
+                       return EPERM;
+               }
+       }
+
+    return sysctl_handle_quad(oidp, arg1, arg2, req);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, osvariant_status,
+        CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED,
+        &osvariant_status, sizeof(osvariant_status),
+        sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information");
+
 STATIC int
 sysctl_sysctl_bootargs
 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
 {
        int error;
-       char buf[256];
+       /* BOOT_LINE_LENGTH */
+#if CONFIG_EMBEDDED
+       size_t boot_args_len = 256;
+#else
+       size_t boot_args_len = 1024;
+#endif
+       char buf[boot_args_len];
 
-       strlcpy(buf, PE_boot_args(), 256);
-       error = sysctl_io_string(req, buf, 256, 0, NULL);
+       strlcpy(buf, PE_boot_args(), boot_args_len);
+       error = sysctl_io_string(req, buf, boot_args_len, 0, NULL);
        return(error);
 }
 
@@ -1690,6 +1771,21 @@ SYSCTL_PROC(_kern, OID_AUTO, bootargs,
        NULL, 0,
        sysctl_sysctl_bootargs, "A", "bootargs");
 
+STATIC int
+sysctl_kernelcacheuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
+{
+    int rval = ENOENT;
+    if (kernelcache_uuid_valid) {
+        rval = sysctl_handle_string(oidp, arg1, arg2, req);
+    }
+    return rval;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, kernelcacheuuid,
+        CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
+        kernelcache_uuid_string, sizeof(kernelcache_uuid_string),
+        sysctl_kernelcacheuuid, "A", "");
+
 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, 
                CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
                &maxfiles, 0, "");
@@ -1737,7 +1833,6 @@ sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unuse
        int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
 
        if (oldval != desiredvnodes) {
-               reset_vmobjectcache(oldval, desiredvnodes);
                resize_namecache(desiredvnodes);
        }
 
@@ -1773,7 +1868,75 @@ extern int sched_smt_balance;
 SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance, 
                CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED, 
                &sched_smt_balance, 0, "");
-#endif
+extern int sched_allow_rt_smt;
+SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt, 
+               CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED, 
+               &sched_allow_rt_smt, 0, "");
+#if __arm__ || __arm64__
+extern uint32_t perfcontrol_requested_recommended_cores;
+SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores,
+               CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
+               &perfcontrol_requested_recommended_cores, 0, "");
+
+/* Scheduler perfcontrol callouts sysctls */
+SYSCTL_DECL(_kern_perfcontrol_callout);
+SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
+       "scheduler perfcontrol callouts");
+
+extern int perfcontrol_callout_stats_enabled;
+SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled, 
+               CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED, 
+               &perfcontrol_callout_stats_enabled, 0, "");
+
+extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
+       perfcontrol_callout_stat_t stat);
+
+/* On-Core Callout */
+STATIC int
+sysctl_perfcontrol_callout_stat
+(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
+{
+       perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1;
+       perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2;
+       return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat),
+               sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE,
+               sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE,
+               sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE,
+               sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE,
+               sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT,
+               sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT,
+               sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE,
+               sysctl_perfcontrol_callout_stat, "I", "");
+SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
+               sysctl_perfcontrol_callout_stat, "I", "");
+
+#endif /* __arm__ || __arm64__ */
+#endif /* (DEVELOPMENT || DEBUG) */
 
 STATIC int
 sysctl_securelvl
@@ -1852,10 +2015,6 @@ SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
                CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
                &speculative_reads_disabled, 0, "");
 
-SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd, 
-               CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
-               &ignore_is_ssd, 0, "");
-
 SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes, 
                CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
                &preheat_max_bytes, 0, "");
@@ -1886,11 +2045,11 @@ SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
 
 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
                CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
-               &vm_page_speculative_percentage, 0, "");
+               &vm_pageout_state.vm_page_speculative_percentage, 0, "");
 
 SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
                CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
-               &vm_page_speculative_q_age_ms, 0, "");
+               &vm_pageout_state.vm_page_speculative_q_age_ms, 0, "");
 
 SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
                CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
@@ -1908,18 +2067,19 @@ STATIC int
 sysctl_boottime
 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
 {
-       time_t tv_sec = boottime_sec();
+       struct timeval tv;
+       boottime_timeval(&tv);
        struct proc *p = req->p;
 
        if (proc_is64bit(p)) {
-               struct user64_timeval t;
-               t.tv_sec = tv_sec;
-               t.tv_usec = 0;
+               struct user64_timeval t = {};
+               t.tv_sec = tv.tv_sec;
+               t.tv_usec = tv.tv_usec;
                return sysctl_io_opaque(req, &t, sizeof(t), NULL);
        } else {
-               struct user32_timeval t;
-               t.tv_sec = tv_sec;
-               t.tv_usec = 0;
+               struct user32_timeval t = {};
+               t.tv_sec = tv.tv_sec;
+               t.tv_usec = tv.tv_usec;
                return sysctl_io_opaque(req, &t, sizeof(t), NULL);
        }
 }
@@ -2005,7 +2165,7 @@ sysctl_imgsrcinfo
 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
 {
        int error;
-       struct imgsrc_info info[MAX_IMAGEBOOT_NESTING]; /* 2 for now, no problem */
+       struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {};    /* 2 for now, no problem */
        uint32_t i;
        vnode_t rvp, devvp;
 
@@ -2087,7 +2247,7 @@ SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "lo
 enum {
        THRESHOLD, QCOUNT,
        ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
-       LATENCY, LATENCY_MIN, LATENCY_MAX
+       LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES
 };
 extern uint64_t        timer_sysctl_get(int);
 extern int      timer_sysctl_set(int, uint64_t);
@@ -2112,10 +2272,21 @@ sysctl_timer
 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
                CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
                (void *) THRESHOLD, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit,
+               CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+               (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", "");
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_interval,
+               CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+               (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", "");
+
 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
                CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
                (void *) QCOUNT, 0, sysctl_timer, "Q", "");
-#if DEBUG
+SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses,
+               CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+               (void *) PAUSES, 0, sysctl_timer, "Q", "");
+
+#if  DEBUG
 SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
                CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
                (void *) ENQUEUES, 0, sysctl_timer, "Q", "");
@@ -2164,6 +2335,8 @@ SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
                CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
                0, 0, sysctl_usrstack64, "Q", "");
 
+#if CONFIG_COREDUMP
+
 SYSCTL_STRING(_kern, KERN_COREFILE, corefile, 
                CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
                corefilename, sizeof(corefilename), "");
@@ -2216,6 +2389,8 @@ SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
                CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
                0, 0, sysctl_suid_coredump, "I", "");
 
+#endif /* CONFIG_COREDUMP */
+
 STATIC int
 sysctl_delayterm
 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
@@ -2404,11 +2579,11 @@ sysctl_loadavg
 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
 {
                if (proc_is64bit(req->p)) {
-                       struct user64_loadavg loadinfo64;
+                       struct user64_loadavg loadinfo64 = {};
                        fill_loadavg64(&averunnable, &loadinfo64);
                        return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
                } else {
-                       struct user32_loadavg loadinfo32;
+                       struct user32_loadavg loadinfo32 = {};
                        fill_loadavg32(&averunnable, &loadinfo32);
                        return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
                }
@@ -2438,6 +2613,323 @@ sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *
 
 SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
 
+#ifdef CONFIG_XNUPOST
+
+extern int xnupost_export_testdata(void *outp, uint32_t size, uint32_t *lenp);
+extern uint32_t xnupost_get_estimated_testdata_size(void);
+
+extern int xnupost_reset_all_tests(void);
+
+STATIC int
+sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS
+{
+       /* fixup unused arguments warnings */
+       __unused int _oa2                  = arg2;
+       __unused void * _oa1               = arg1;
+       __unused struct sysctl_oid * _oidp = oidp;
+
+       int error          = 0;
+       user_addr_t oldp   = 0;
+       user_addr_t newp   = 0;
+       uint32_t usedbytes = 0;
+
+       oldp = req->oldptr;
+       newp = req->newptr;
+
+       if (newp)
+               return ENOTSUP;
+
+       if ((void *)oldp == NULL) {
+               /* return estimated size for second call where info can be placed */
+               req->oldidx = xnupost_get_estimated_testdata_size();
+       } else {
+               error       = xnupost_export_testdata((void *)oldp, req->oldlen, &usedbytes);
+               req->oldidx = usedbytes;
+       }
+
+       return error;
+}
+
+SYSCTL_PROC(_debug,
+            OID_AUTO,
+            xnupost_get_tests,
+            CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
+            0,
+            0,
+            sysctl_handle_xnupost_get_tests,
+            "-",
+            "read xnupost test data in kernel");
+
+STATIC int
+sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS
+{
+       /* fixup unused arguments warnings */
+       __unused int _oa2                  = arg2;
+       __unused void * _oa1               = arg1;
+       __unused struct sysctl_oid * _oidp = oidp;
+
+#define ARRCOUNT 4
+       /*
+        * INPUT: ACTION,  PARAM1, PARAM2, PARAM3
+        * OUTPUT: RESULTCODE, ADDITIONAL DATA
+        */
+       int32_t outval[ARRCOUNT] = {0};
+       int32_t input[ARRCOUNT]  = {0};
+       int32_t out_size         = sizeof(outval);
+       int32_t in_size          = sizeof(input);
+       int error                = 0;
+
+       /* if this is NULL call to find out size, send out size info */
+       if (!req->newptr) {
+               goto out;
+       }
+
+       /* pull in provided value from userspace */
+       error = SYSCTL_IN(req, &input[0], in_size);
+       if (error)
+               return error;
+
+       if (input[0] == XTCTL_RESET_TESTDATA) {
+               outval[0] = xnupost_reset_all_tests();
+               goto out;
+       }
+
+out:
+       error = SYSCTL_OUT(req, &outval[0], out_size);
+       return error;
+}
+
+SYSCTL_PROC(_debug,
+            OID_AUTO,
+            xnupost_testctl,
+            CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
+            0,
+            0,
+            sysctl_debug_xnupost_ctl,
+            "I",
+            "xnupost control for kernel testing");
+
+extern void test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t arraycount);
+
+STATIC int
+sysctl_debug_test_oslog_ctl(__unused struct sysctl_oid * oidp, __unused void * arg1, __unused int arg2, struct sysctl_req * req)
+{
+#define ARRCOUNT 4
+       int32_t outval[ARRCOUNT] = {0};
+       int32_t input[ARRCOUNT]  = {0};
+       int32_t size_outval      = sizeof(outval);
+       int32_t size_inval       = sizeof(input);
+       int32_t error;
+
+       /* if this is NULL call to find out size, send out size info */
+       if (!req->newptr) {
+               error = SYSCTL_OUT(req, &outval[0], size_outval);
+               return error;
+       }
+
+       /* pull in provided value from userspace */
+       error = SYSCTL_IN(req, &input[0], size_inval);
+       if (error)
+               return error;
+
+       test_oslog_handleOSLogCtl(input, outval, ARRCOUNT);
+
+       error = SYSCTL_OUT(req, &outval[0], size_outval);
+
+       return error;
+}
+
+SYSCTL_PROC(_debug,
+            OID_AUTO,
+            test_OSLogCtl,
+            CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED,
+            0,
+            0,
+            sysctl_debug_test_oslog_ctl,
+            "I",
+            "testing oslog in kernel");
+
+#include <mach/task.h>
+#include <mach/semaphore.h>
+
+extern lck_grp_t * sysctl_debug_test_stackshot_owner_grp; /* used for both mutexes and rwlocks */
+extern lck_mtx_t * sysctl_debug_test_stackshot_owner_init_mtx; /* used to protect lck_*_init */
+
+/* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded
+ * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a
+ * stackshot is taken to see if the owner of the lock can be identified.
+ *
+ * We can't return to userland with a kernel lock held, so be sure to unlock before we leave.
+ * the semaphores allow us to artificially create cases where the lock is being held and the
+ * thread is hanging / taking a long time to do something. */
+
+volatile char      sysctl_debug_test_stackshot_mtx_inited = 0;
+semaphore_t        sysctl_debug_test_stackshot_mutex_sem;
+lck_mtx_t          sysctl_debug_test_stackshot_owner_lck;
+
+#define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT   1
+#define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2
+#define SYSCTL_DEBUG_MTX_SIGNAL         3
+#define SYSCTL_DEBUG_MTX_TEARDOWN       4
+
+STATIC int
+sysctl_debug_test_stackshot_mutex_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       long long option = -1;
+       /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */
+       long long mtx_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck);
+       int error = sysctl_io_number(req, mtx_unslid_addr, sizeof(long long), (void*)&option, NULL);
+
+       lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
+       if (!sysctl_debug_test_stackshot_mtx_inited) {
+               lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck,
+                               sysctl_debug_test_stackshot_owner_grp,
+                               LCK_ATTR_NULL);
+               semaphore_create(kernel_task,
+                               &sysctl_debug_test_stackshot_mutex_sem,
+                               SYNC_POLICY_FIFO, 0);
+               sysctl_debug_test_stackshot_mtx_inited = 1;
+       }
+       lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
+
+       if (!error) {
+               switch(option) {
+                       case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT:
+                               lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
+                               lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
+                               break;
+                       case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT:
+                               lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck);
+                               semaphore_wait(sysctl_debug_test_stackshot_mutex_sem);
+                               lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck);
+                               break;
+                       case SYSCTL_DEBUG_MTX_SIGNAL:
+                               semaphore_signal(sysctl_debug_test_stackshot_mutex_sem);
+                               break;
+                       case SYSCTL_DEBUG_MTX_TEARDOWN:
+                               lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
+
+                               lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck,
+                                               sysctl_debug_test_stackshot_owner_grp);
+                               semaphore_destroy(kernel_task,
+                                               sysctl_debug_test_stackshot_mutex_sem);
+                               sysctl_debug_test_stackshot_mtx_inited = 0;
+
+                               lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
+                               break;
+                       case -1: /* user just wanted to read the value, so do nothing */
+                               break;
+                       default:
+                               error = EINVAL;
+                               break;
+               }
+       }
+       return error;
+}
+
+/* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave.
+ * the semaphores allow us to artificially create cases where the lock is being held and the
+ * thread is hanging / taking a long time to do something. */
+
+SYSCTL_PROC(_debug,
+            OID_AUTO,
+            test_MutexOwnerCtl,
+            CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+            0,
+            0,
+            sysctl_debug_test_stackshot_mutex_owner,
+            "-",
+            "Testing mutex owner in kernel");
+
+volatile char sysctl_debug_test_stackshot_rwlck_inited = 0;
+lck_rw_t      sysctl_debug_test_stackshot_owner_rwlck;
+semaphore_t   sysctl_debug_test_stackshot_rwlck_sem;
+
+#define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1
+#define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT   2
+#define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3
+#define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT   4
+#define SYSCTL_DEBUG_KRWLCK_SIGNAL          5
+#define SYSCTL_DEBUG_KRWLCK_TEARDOWN        6
+
+STATIC int
+sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       long long option = -1;
+       /* if the user tries to read the sysctl, we tell them what the address of the lock is 
+        * (to test against stackshot's output) */
+       long long rwlck_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck);
+       int error = sysctl_io_number(req, rwlck_unslid_addr, sizeof(long long), (void*)&option, NULL);
+
+       lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
+       if (!sysctl_debug_test_stackshot_rwlck_inited) {
+               lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck,
+                               sysctl_debug_test_stackshot_owner_grp,
+                               LCK_ATTR_NULL);
+               semaphore_create(kernel_task,
+                               &sysctl_debug_test_stackshot_rwlck_sem,
+                               SYNC_POLICY_FIFO,
+                               0);
+               sysctl_debug_test_stackshot_rwlck_inited = 1;
+       }
+       lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
+
+       if (!error) {
+               switch(option) {
+                       case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT:
+                               lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
+                               lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
+                               break;
+                       case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT:
+                               lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
+                               semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
+                               lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED);
+                               break;
+                       case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT:
+                               lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
+                               lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
+                               break;
+                       case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT:
+                               lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
+                               semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem);
+                               lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE);
+                               break;
+                       case SYSCTL_DEBUG_KRWLCK_SIGNAL:
+                               semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem);
+                               break;
+                       case SYSCTL_DEBUG_KRWLCK_TEARDOWN:
+                               lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx);
+
+                               lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck,
+                                               sysctl_debug_test_stackshot_owner_grp);
+                               semaphore_destroy(kernel_task,
+                                               sysctl_debug_test_stackshot_rwlck_sem);
+                               sysctl_debug_test_stackshot_rwlck_inited = 0;
+
+                               lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx);
+                               break;
+                       case -1: /* user just wanted to read the value, so do nothing */
+                               break;
+                       default:
+                               error = EINVAL;
+                               break;
+               }
+       }
+       return error;
+}
+
+
+SYSCTL_PROC(_debug,
+            OID_AUTO,
+            test_RWLockOwnerCtl,
+            CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+            0,
+            0,
+            sysctl_debug_test_stackshot_rwlck_owner,
+            "-",
+            "Testing rwlock owner in kernel");
+#endif /* !CONFIG_XNUPOST */
+
 STATIC int
 sysctl_swapusage
 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
@@ -2447,7 +2939,7 @@ sysctl_swapusage
                uint64_t                swap_avail;
                vm_size_t               swap_pagesize;
                boolean_t               swap_encrypted;
-               struct xsw_usage        xsu;
+               struct xsw_usage        xsu = {};
 
                error = macx_swapinfo(&swap_total,
                                      &swap_avail,
@@ -2472,6 +2964,7 @@ SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
 
 #if CONFIG_FREEZE
 extern void vm_page_reactivate_all_throttled(void);
+extern void memorystatus_disable_freeze(void);
 
 static int
 sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
@@ -2483,8 +2976,8 @@ sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
        error = sysctl_handle_int(oidp, &val, 0, req);
        if (error || !req->newptr)
                return (error);
-       
-       if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+
+       if (! VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
                //assert(req->newptr);
                printf("Failed attempt to set vm.freeze_enabled sysctl\n");
                return EINVAL;
@@ -2499,14 +2992,62 @@ sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
        
        if (disabled) {
                vm_page_reactivate_all_throttled();
+               memorystatus_disable_freeze();
        }
        
        return (0);
 }
 
-SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
+SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
 #endif /* CONFIG_FREEZE */
 
+#if DEVELOPMENT || DEBUG
+extern int vm_num_swap_files_config;
+extern int vm_num_swap_files;
+extern lck_mtx_t vm_swap_data_lock;
+#define VM_MAX_SWAP_FILE_NUM           100
+
+static int
+sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int error = 0, val = vm_num_swap_files_config;
+
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr) {
+               goto out;
+       }
+
+       if (!VM_CONFIG_SWAP_IS_ACTIVE && !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
+               printf("Swap is disabled\n");
+               error = EINVAL;
+               goto out;
+       }
+
+       lck_mtx_lock(&vm_swap_data_lock);
+
+       if (val < vm_num_swap_files) {
+               printf("Cannot configure fewer swap files than already exist.\n");
+               error = EINVAL;
+               lck_mtx_unlock(&vm_swap_data_lock);
+               goto out;
+       }
+
+       if (val > VM_MAX_SWAP_FILE_NUM) {
+               printf("Capping number of swap files to upper bound.\n");
+               val = VM_MAX_SWAP_FILE_NUM;
+       }
+
+       vm_num_swap_files_config = val;
+       lck_mtx_unlock(&vm_swap_data_lock);
+out:
+
+       return (0);
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, num_swap_files_configured, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_config_num_swap_files, "I", "");
+#endif /* DEVELOPMENT || DEBUG */
+
 /* this kernel does NOT implement shared_region_make_private_np() */
 SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private, 
                CTLFLAG_RD | CTLFLAG_LOCKED, 
@@ -2537,8 +3078,9 @@ fetch_process_cputype(
        }
 
        ret = cpu_type() & ~CPU_ARCH_MASK;
-       if (IS_64BIT_PROCESS(p))
+       if (IS_64BIT_PROCESS(p)) {
                ret |= CPU_ARCH_ABI64;
+       }
 
        *cputype = ret;
        
@@ -2597,6 +3139,16 @@ SYSCTL_PROC(_kern, OID_AUTO, singleuser,
                CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
                0, 0, sysctl_singleuser, "I", "");
 
+STATIC int sysctl_minimalboot
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, minimalboot,
+               CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               0, 0, sysctl_minimalboot, "I", "");
+
 /*
  * Controls for debugging affinity sets - see osfmk/kern/affinity.c
  */
@@ -2646,9 +3198,15 @@ vm_map_size_t    vm_user_wire_limit;
 /*
  * There needs to be a more automatic/elegant way to do this
  */
+#if defined(__ARM__)
+SYSCTL_INT(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, 0, "");
+#else
 SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
 SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
 SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
+#endif
 
 extern int vm_map_copy_overwrite_aligned_src_not_internal;
 extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
@@ -2659,62 +3217,343 @@ SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_ma
 
 
 extern uint32_t        vm_page_external_count;
-extern uint32_t        vm_page_filecache_min;
 
 SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min, 0, "");
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min_divisor, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min_divisor, 0, "");
+#endif
 
 extern int     vm_compressor_mode;
 extern int     vm_compressor_is_active;
+extern int     vm_compressor_available;
+extern uint32_t        vm_ripe_target_age;
 extern uint32_t        swapout_target_age;
 extern int64_t  compressor_bytes_used;
+extern int64_t  c_segment_input_bytes;
+extern int64_t  c_segment_compressed_bytes;
 extern uint32_t        compressor_eval_period_in_msecs;
 extern uint32_t        compressor_sample_min_in_msecs;
 extern uint32_t        compressor_sample_max_in_msecs;
 extern uint32_t        compressor_thrashing_threshold_per_10msecs;
 extern uint32_t        compressor_thrashing_min_per_10msecs;
+extern uint32_t vm_compressor_time_thread;
+
+#if DEVELOPMENT || DEBUG
 extern uint32_t        vm_compressor_minorcompact_threshold_divisor;
 extern uint32_t        vm_compressor_majorcompact_threshold_divisor;
 extern uint32_t        vm_compressor_unthrottle_threshold_divisor;
 extern uint32_t        vm_compressor_catchup_threshold_divisor;
 
-SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
-SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
+extern uint32_t        vm_compressor_minorcompact_threshold_divisor_overridden;
+extern uint32_t        vm_compressor_majorcompact_threshold_divisor_overridden;
+extern uint32_t        vm_compressor_unthrottle_threshold_divisor_overridden;
+extern uint32_t        vm_compressor_catchup_threshold_divisor_overridden;
 
-SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
+extern vmct_stats_t vmct_stats;
 
-SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
 
-#if CONFIG_PHANTOM_CACHE
-extern uint32_t phantom_cache_thrashing_threshold;
-extern uint32_t phantom_cache_eval_period_in_msecs;
-extern uint32_t phantom_cache_thrashing_threshold_ssd;
+STATIC int
+sysctl_minorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       int new_value, changed;
+       int error = sysctl_io_number(req, vm_compressor_minorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
 
+       if (changed) {
+               vm_compressor_minorcompact_threshold_divisor = new_value;
+               vm_compressor_minorcompact_threshold_divisor_overridden = 1;
+       }
+       return(error);
+}
 
-SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
+SYSCTL_PROC(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor,
+           CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
+           0, 0, sysctl_minorcompact_threshold_divisor, "I", "");
+
+
+STATIC int
+sysctl_majorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       int new_value, changed;
+       int error = sysctl_io_number(req, vm_compressor_majorcompact_threshold_divisor, sizeof(int), &new_value, &changed);
+
+       if (changed) {
+               vm_compressor_majorcompact_threshold_divisor = new_value;
+               vm_compressor_majorcompact_threshold_divisor_overridden = 1;
+       }
+       return(error);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor,
+           CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
+           0, 0, sysctl_majorcompact_threshold_divisor, "I", "");
+
+
+STATIC int
+sysctl_unthrottle_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       int new_value, changed;
+       int error = sysctl_io_number(req, vm_compressor_unthrottle_threshold_divisor, sizeof(int), &new_value, &changed);
+
+       if (changed) {
+               vm_compressor_unthrottle_threshold_divisor = new_value;
+               vm_compressor_unthrottle_threshold_divisor_overridden = 1;
+       }
+       return(error);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor,
+           CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
+           0, 0, sysctl_unthrottle_threshold_divisor, "I", "");
+
+
+STATIC int
+sysctl_catchup_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       int new_value, changed;
+       int error = sysctl_io_number(req, vm_compressor_catchup_threshold_divisor, sizeof(int), &new_value, &changed);
+
+       if (changed) {
+               vm_compressor_catchup_threshold_divisor = new_value;
+               vm_compressor_catchup_threshold_divisor_overridden = 1;
+       }
+       return(error);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, compressor_catchup_threshold_divisor,
+           CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
+           0, 0, sysctl_catchup_threshold_divisor, "I", "");
+#endif
+
+
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_input_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_input_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &c_segment_compressed_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_is_active, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_is_active, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
+
+SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_timing_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_time_thread, 0, "");
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[0], "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_runtime1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_runtimes[1], "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_threads_total, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_cthreads_total, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[0], "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_pages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_pages[1], "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[0], "");
+SYSCTL_QUAD(_vm, OID_AUTO, compressor_thread_iterations1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_iterations[1], "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[0], 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_minpages[1], 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, "");
+
+#endif
+
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compression_failures, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressed_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_delta, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_wk_compression_negative_delta, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_wk_compression_negative_delta, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, lz4_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_decompressed_bytes, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, uc_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.uc_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_cabstime, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_cabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_compressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wks_catime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_cabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressions_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressions_exclusive, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_compressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_mzv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_mzv_compressions, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compression_failures, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_exclusive, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_exclusive, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_compressed_bytes_total, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_compressed_bytes_total, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wks_compressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compressed_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_compression_failures, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_compression_failures, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_sv_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_sv_compressions, "");
+
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_dabstime, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_dabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wkh_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wkh_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wks_datime, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_dabstime, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wks_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wks_decompressions, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, wk_decompressed_bytes, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_decompressed_bytes, "");
+SYSCTL_QUAD(_vm, OID_AUTO, wk_sv_decompressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.wk_sv_decompressions, "");
+
+SYSCTL_INT(_vm, OID_AUTO, lz4_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_threshold, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, wkdm_reeval_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.wkdm_reeval_threshold, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_skips, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_skips, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_max_failure_run_length, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_failure_run_length, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_max_preselects, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_max_preselects, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_run_preselection_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_preselection_threshold, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_run_continue_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_run_continue_bytes, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, lz4_profitable_bytes, CTLFLAG_RW | CTLFLAG_LOCKED, &vmctune.lz4_profitable_bytes, 0, "");
+#if DEVELOPMENT || DEBUG
+extern int vm_compressor_current_codec;
+extern int vm_compressor_test_seg_wp;
+extern boolean_t vm_compressor_force_sw_wkdm;
+SYSCTL_INT(_vm, OID_AUTO, compressor_codec, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_current_codec, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, compressor_test_wp, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_test_seg_wp, 0, "");
+
+SYSCTL_INT(_vm, OID_AUTO, wksw_force, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_force_sw_wkdm, 0, "");
+extern int precompy, wkswhw;
+
+SYSCTL_INT(_vm, OID_AUTO, precompy, CTLFLAG_RW | CTLFLAG_LOCKED, &precompy, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, wkswhw, CTLFLAG_RW | CTLFLAG_LOCKED, &wkswhw, 0, "");
+extern unsigned int vm_ktrace_enabled;
+SYSCTL_INT(_vm, OID_AUTO, vm_ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ktrace_enabled, 0, "");
+#endif
+
+#if CONFIG_PHANTOM_CACHE
+extern uint32_t phantom_cache_thrashing_threshold;
+extern uint32_t phantom_cache_eval_period_in_msecs;
+extern uint32_t phantom_cache_thrashing_threshold_ssd;
+
+
+SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
 SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
 #endif
 
+#if CONFIG_BACKGROUND_QUEUE
+
+extern uint32_t        vm_page_background_count;
+extern uint32_t        vm_page_background_target;
+extern uint32_t        vm_page_background_internal_count;
+extern uint32_t        vm_page_background_external_count;
+extern uint32_t        vm_page_background_mode;
+extern uint32_t        vm_page_background_exclude_external;
+extern uint64_t        vm_page_background_promoted_count;
+extern uint64_t vm_pageout_rejected_bq_internal;
+extern uint64_t vm_pageout_rejected_bq_external;
+
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_mode, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_mode, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_exclude_external, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_exclude_external, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_target, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_background_target, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_count, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_internal_count, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_page_background_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_external_count, 0, "");
+
+SYSCTL_QUAD(_vm, OID_AUTO, vm_page_background_promoted_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_background_promoted_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_internal, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_external, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_internal, "");
+SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_rejected_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_rejected_bq_external, "");
+
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+extern void vm_update_darkwake_mode(boolean_t);
+extern boolean_t vm_darkwake_mode;
+
+STATIC int
+sysctl_toggle_darkwake_mode(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       int new_value, changed;
+       int error = sysctl_io_number(req, vm_darkwake_mode, sizeof(int), &new_value, &changed);
+
+       if ( !error && changed) {
+
+               if (new_value != 0 && new_value != 1) {
+                       printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n");
+                       error = EINVAL;
+               } else {
+                       vm_update_darkwake_mode((boolean_t) new_value);
+               }
+       }
+
+       return(error);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, darkwake_mode,
+           CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW,
+           0, 0, sysctl_toggle_darkwake_mode, "I", "");
+
 #if (DEVELOPMENT || DEBUG)
 
 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard,
-           CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
-           &vm_page_creation_throttled_hard, 0, "");
+               CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+               &vm_page_creation_throttled_hard, 0, "");
 
 SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft,
-           CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
-           &vm_page_creation_throttled_soft, 0, "");
+               CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
+               &vm_page_creation_throttled_soft, 0, "");
+
+extern uint32_t vm_pageout_memorystatus_fb_factor_nr;
+extern uint32_t vm_pageout_memorystatus_fb_factor_dr;
+SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_nr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_nr, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_pageout_memorystatus_fb_factor_dr, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_memorystatus_fb_factor_dr, 0, "");
+
+
+SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_overrides, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_nops, 0, "");
+
+/* log message counters for persistence mode */
+extern uint32_t oslog_p_total_msgcount;
+extern uint32_t oslog_p_metadata_saved_msgcount;
+extern uint32_t oslog_p_metadata_dropped_msgcount;
+extern uint32_t oslog_p_error_count;
+extern uint32_t oslog_p_saved_msgcount;
+extern uint32_t oslog_p_dropped_msgcount;
+extern uint32_t oslog_p_boot_dropped_msgcount;
+
+/* log message counters for streaming mode */
+extern uint32_t oslog_s_total_msgcount;
+extern uint32_t oslog_s_metadata_msgcount;
+extern uint32_t oslog_s_error_count;
+extern uint32_t oslog_s_streamed_msgcount;
+extern uint32_t oslog_s_dropped_msgcount;
+
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_error_count, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, "");
+
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_error_count, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, "");
+SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, "");
+
 
 #endif /* DEVELOPMENT || DEBUG */
 
@@ -2734,6 +3573,10 @@ SYSCTL_INT (_kern, OID_AUTO, stack_size,
 SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
            CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
 
+extern unsigned int kern_feature_overrides;
+SYSCTL_INT (_kern, OID_AUTO, kern_feature_overrides,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask");
+
 /*
  * enable back trace for port allocations
  */
@@ -2747,22 +3590,46 @@ SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
  * Scheduler sysctls
  */
 
-/*
- * See osfmk/kern/sched_prim.c for the corresponding definition
- * in osfmk/. If either version changes, update the other.
- */
-#define SCHED_STRING_MAX_LENGTH (48)
-
-extern char sched_string[SCHED_STRING_MAX_LENGTH];
 SYSCTL_STRING(_kern, OID_AUTO, sched,
                          CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
                          sched_string, sizeof(sched_string),
                          "Timeshare scheduler implementation");
 
+#if CONFIG_QUIESCE_COUNTER
+static int
+sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+       int error = sysctl_handle_int(oidp, &cpu_checkin_min_interval_us, 0, req);
+       if (error || !req->newptr)
+               return error;
+
+       cpu_quiescent_counter_set_min_interval_us(cpu_checkin_min_interval_us);
+
+       return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval,
+            CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+            0, 0,
+            sysctl_cpu_quiescent_counter_interval, "I",
+            "Quiescent CPU checkin interval (microseconds)");
+#endif /* CONFIG_QUIESCE_COUNTER */
+
+
 /*
  * Only support runtime modification on embedded platforms
  * with development config enabled
  */
+#if CONFIG_EMBEDDED
+#if !SECURE_KERNEL
+extern int precise_user_kernel_time;
+SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time, 
+               CTLFLAG_RW | CTLFLAG_LOCKED,
+               &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time");
+#endif
+#endif
 
 
 /* Parameters related to timer coalescing tuning, to be replaced
@@ -2944,3 +3811,716 @@ SYSCTL_INT(_kern, OID_AUTO, hv_support,
                CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, 
                &hv_support_available, 0, "");
 #endif
+
+#if CONFIG_EMBEDDED
+STATIC int
+sysctl_darkboot SYSCTL_HANDLER_ARGS
+{
+       int err = 0, value = 0;
+#pragma unused(oidp, arg1, arg2, err, value, req)
+
+       /*
+        * Handle the sysctl request.
+        *
+        * If this is a read, the function will set the value to the current darkboot value. Otherwise,
+        * we'll get the request identifier into "value" and then we can honor it.
+        */
+       if ((err = sysctl_io_number(req, darkboot, sizeof(int), &value, NULL)) != 0) {
+               goto exit;
+       }
+
+       /* writing requested, let's process the request */
+       if (req->newptr) {
+               /* writing is protected by an entitlement */
+               if (priv_check_cred(kauth_cred_get(), PRIV_DARKBOOT, 0) != 0) {
+                       err = EPERM;
+                       goto exit;
+               }
+
+               switch (value) {
+               case MEMORY_MAINTENANCE_DARK_BOOT_UNSET:
+                       /*
+                        * If the darkboot sysctl is unset, the NVRAM variable
+                        * must be unset too. If that's not the case, it means
+                        * someone is doing something crazy and not supported.
+                        */
+                       if (darkboot != 0) {
+                               int ret = PERemoveNVRAMProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME);
+                               if (ret) {
+                                       darkboot = 0;
+                               } else {
+                                       err = EINVAL;
+                               }
+                       }
+                       break;
+               case MEMORY_MAINTENANCE_DARK_BOOT_SET:
+                       darkboot = 1;
+                       break;
+               case MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT: {
+                       /*
+                        * Set the NVRAM and update 'darkboot' in case
+                        * of success. Otherwise, do not update
+                        * 'darkboot' and report the failure.
+                        */
+                       if (PEWriteNVRAMBooleanProperty(MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME, TRUE)) {
+                               darkboot = 1;
+                       } else {
+                               err = EINVAL;
+                       }
+
+                       break;
+               }
+               default:
+                       err = EINVAL;
+               }
+       }
+
+exit:
+       return err;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, darkboot,
+           CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY,
+           0, 0, sysctl_darkboot, "I", "");
+#endif
+
+#if DEVELOPMENT || DEBUG
+#include <sys/sysent.h>
+/* This should result in a fatal exception, verifying that "sysent" is
+ * write-protected.
+ */
+static int
+kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
+       uint64_t new_value = 0, old_value = 0;
+       int changed = 0, error;
+
+       error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed);
+       if ((error == 0) && changed) {
+               volatile uint32_t *wraddr = (uint32_t *) &sysent[0];
+               *wraddr = 0;
+               printf("sysent[0] write succeeded\n");
+       }
+       return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, sysent_const_check,
+    CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+    0, 0,
+    kern_sysent_write, "I", "Attempt sysent[0] write");
+
+#endif
+
+#if DEVELOPMENT || DEBUG
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 1, "");
+#else
+SYSCTL_COMPAT_INT(_kern, OID_AUTO, development, CTLFLAG_RD | CTLFLAG_MASKED, NULL, 0, "");
+#endif
+
+
+#if DEVELOPMENT || DEBUG
+
+static int
+sysctl_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+    int rval = 0;
+    char str[32] = "entry prelog postlog postcore";
+
+    rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+    if (rval == 0 && req->newptr) {
+        if (strncmp("entry", str, strlen("entry")) == 0) {
+            panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry");
+        } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+            panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog");
+        } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+            panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog");
+        } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+            panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core");
+        }
+    }
+
+    return rval;
+}
+
+static int
+sysctl_debugger_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+    int rval = 0;
+    char str[32] = "entry prelog postlog postcore";
+
+    rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+
+    if (rval == 0 && req->newptr) {
+        if (strncmp("entry", str, strlen("entry")) == 0) {
+            DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY);
+        } else if (strncmp("prelog", str, strlen("prelog")) == 0) {
+            DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG);
+        } else if (strncmp("postlog", str, strlen("postlog")) == 0) {
+            DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG);
+        } else if (strncmp("postcore", str, strlen("postcore")) == 0) {
+            DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE);
+        }
+    }
+
+    return rval;
+}
+
+decl_lck_spin_data(, spinlock_panic_test_lock)
+
+__attribute__((noreturn))
+static void
+spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused)
+{
+       lck_spin_lock(&spinlock_panic_test_lock);
+       while (1) { ; }
+}
+
+static int
+sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+       if (req->newlen == 0)
+               return EINVAL;
+
+       thread_t panic_spinlock_thread;
+       /* Initialize panic spinlock */
+       lck_grp_t * panic_spinlock_grp;
+       lck_grp_attr_t * panic_spinlock_grp_attr;
+       lck_attr_t * panic_spinlock_attr;
+
+       panic_spinlock_grp_attr = lck_grp_attr_alloc_init();
+       panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock",  panic_spinlock_grp_attr);
+       panic_spinlock_attr = lck_attr_alloc_init();
+
+       lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr);
+
+
+       /* Create thread to acquire spinlock */
+       if (kernel_thread_start(spinlock_panic_test_acquire_spinlock, NULL, &panic_spinlock_thread) != KERN_SUCCESS) {
+               return EBUSY;
+       }
+
+       /* Try to acquire spinlock -- should panic eventually */
+       lck_spin_lock(&spinlock_panic_test_lock);
+       while(1) { ; }
+}
+
+__attribute__((noreturn))
+static void
+simultaneous_panic_worker
+(void * arg, wait_result_t wres __unused)
+{
+       atomic_int *start_panic = (atomic_int *)arg;
+
+       while (!atomic_load(start_panic)) { ; }
+       panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number());
+       __builtin_unreachable();
+}
+
+static int
+sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+       if (req->newlen == 0)
+               return EINVAL;
+
+       int i = 0, threads_to_create = 2 * processor_count;
+       atomic_int start_panic = 0;
+       unsigned int threads_created = 0;
+       thread_t new_panic_thread;
+
+       for (i = threads_to_create; i > 0; i--) {
+               if (kernel_thread_start(simultaneous_panic_worker, (void *) &start_panic, &new_panic_thread) == KERN_SUCCESS) {
+                       threads_created++;
+               }
+       }
+
+       /* FAIL if we couldn't create at least processor_count threads */
+       if (threads_created < processor_count) {
+               panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)",
+                               threads_created, threads_to_create);
+       }
+
+       atomic_exchange(&start_panic, 1);
+       while (1) { ; }
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test");
+SYSCTL_PROC(_debug, OID_AUTO, debugger_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_debugger_test, "A", "debugger test");
+SYSCTL_PROC(_debug, OID_AUTO, spinlock_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_spinlock_panic_test, "A", "spinlock panic test");
+SYSCTL_PROC(_debug, OID_AUTO, simultaneous_panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_simultaneous_panic_test, "A", "simultaneous panic test");
+
+extern int exc_resource_threads_enabled;
+
+SYSCTL_INT(_kern, OID_AUTO, exc_resource_threads_enabled, CTLFLAG_RD | CTLFLAG_LOCKED, &exc_resource_threads_enabled, 0, "exc_resource thread limit enabled");
+
+
+#endif /* DEVELOPMENT || DEBUG */
+
+const uint32_t thread_groups_supported = 0;
+
+STATIC int
+sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       int value = thread_groups_supported;
+        return sysctl_io_number(req, value, sizeof(value), NULL, NULL);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN,
+    0, 0, &sysctl_thread_groups_supported, "I", "thread groups supported");
+
+static int
+sysctl_grade_cputype SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       int error = 0;
+       int type_tuple[2] = {};
+       int return_value = 0;
+
+       error = SYSCTL_IN(req, &type_tuple, sizeof(type_tuple));
+
+       if (error) {
+               return error;
+       }
+
+       return_value = grade_binary(type_tuple[0], type_tuple[1]);
+
+       error = SYSCTL_OUT(req, &return_value, sizeof(return_value));
+
+       if (error) {
+               return error;
+       }
+
+       return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, grade_cputype,
+            CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED|CTLTYPE_OPAQUE,
+            0, 0, &sysctl_grade_cputype, "S",
+            "grade value of cpu_type_t+cpu_sub_type_t");
+
+
+#if DEVELOPMENT || DEBUG
+
+static atomic_int wedge_thread_should_wake = 0;
+
+static int
+unwedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int error, val = 0;
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || val == 0) {
+               return error;
+       }
+
+       atomic_store(&wedge_thread_should_wake, 1);
+       return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, unwedge_thread, "I", "unwedge the thread wedged by kern.wedge_thread");
+
+extern uintptr_t phys_carveout_pa;
+SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED,
+               &phys_carveout_pa,
+               "base physical address of the phys_carveout_mb boot-arg region");
+extern size_t phys_carveout_size;
+SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED,
+               &phys_carveout_size,
+               "size in bytes of the phys_carveout_mb boot-arg region");
+
+static int
+wedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)     
+       
+       int error, val = 0; 
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || val == 0) {
+               return error; 
+       }
+       
+       uint64_t interval = 1;
+       nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval);
+
+       atomic_store(&wedge_thread_should_wake, 0);
+       while (!atomic_load(&wedge_thread_should_wake)) {
+               tsleep1(NULL, 0, "wedge_thread", mach_absolute_time()+interval, NULL);
+       }
+       
+       return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", "wedge this thread so it cannot be cleaned up");
+
+static int
+sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS;
+static int
+sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS;
+int
+tstile_test_prim_lock(boolean_t use_hashtable);
+int
+tstile_test_prim_unlock(boolean_t use_hashtable);
+
+#define SYSCTL_TURNSTILE_TEST_DEFAULT                   1
+#define SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE          2
+
+static int
+sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int error, val = 0;
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || val == 0) {
+               return error;
+       }
+       boolean_t use_hashtable = (val == SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE) ? true : false;
+       return tstile_test_prim_lock(use_hashtable);
+}
+
+static int
+sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int error, val = 0;
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || val == 0) {
+               return error;
+       }
+       boolean_t use_hashtable = (val == SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE) ? true : false;
+       return tstile_test_prim_unlock(use_hashtable);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_lock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock");
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_unlock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock");
+
+int
+turnstile_get_boost_stats_sysctl(void *req);
+int
+turnstile_get_unboost_stats_sysctl(void *req);
+static int
+sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS;
+static int
+sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS;
+extern uint64_t thread_block_on_turnstile_count;
+extern uint64_t thread_block_on_regular_waitq_count;
+
+static int
+sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       return turnstile_get_boost_stats_sysctl(req);
+}
+
+static int
+sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       return turnstile_get_unboost_stats_sysctl(req);
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, turnstile_boost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
+       0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats");
+SYSCTL_PROC(_kern, OID_AUTO, turnstile_unboost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT,
+       0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats");
+SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_turnstile,
+       CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       &thread_block_on_turnstile_count, "thread blocked on turnstile count");
+SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_reg_waitq,
+       CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count");
+
+static int
+sysctl_lck_mtx_test_lock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int error, val = 0;
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || val == 0) {
+               return error;
+       }
+
+       if (val == 1) {
+               lck_mtx_test_init();
+               lck_mtx_test_lock();
+       }
+
+       return 0;
+}
+
+static int
+sysctl_lck_mtx_test_unlock SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int error, val = 0;
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || val == 0) {
+               return error;
+       }
+
+       if (val == 1) {
+               lck_mtx_test_init();
+               lck_mtx_test_unlock();
+       }
+
+       return 0;
+}
+
+static int
+sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int error, val = 0;
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || val == 0) {
+               return error;
+       }
+
+       if (val == 1) {
+               lck_mtx_test_init();
+               erase_all_test_mtx_stats();
+       }
+
+       return 0;
+}
+
+static int
+sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+       char* buffer;
+       int size, buffer_size, error;
+
+       buffer_size = 1000;
+       buffer = kalloc(buffer_size);
+       if (!buffer)
+               panic("Impossible to allocate memory for %s\n", __func__);
+
+       lck_mtx_test_init();
+
+       size = get_test_mtx_stats_string(buffer, buffer_size);
+
+       error = sysctl_io_string(req, buffer, size, 0, NULL);
+
+       kfree(buffer, buffer_size);
+
+       return error;
+}
+
+static int
+sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+       char* buffer;
+       int buffer_size, offset, error, iter;
+       char input_val[40];
+
+       if (!req->newptr) {
+               return 0;
+       }
+
+       if (!req->oldptr) {
+               return EINVAL;
+       }
+
+       if (req->newlen >= sizeof(input_val)) {
+               return EINVAL;
+       }
+
+       error = SYSCTL_IN(req, input_val, req->newlen);
+       if (error) {
+               return error;
+       }
+       input_val[req->newlen] = '\0';
+
+       sscanf(input_val, "%d", &iter);
+
+       if (iter <= 0) {
+               printf("%s requested %d iterations, not starting the test\n", __func__, iter);
+               return EINVAL;
+       }
+
+       lck_mtx_test_init();
+
+       buffer_size = 2000;
+       offset = 0;
+       buffer = kalloc(buffer_size);
+       if (!buffer)
+               panic("Impossible to allocate memory for %s\n", __func__);
+       memset(buffer, 0, buffer_size);
+
+       printf("%s starting uncontended mutex test with %d iterations\n", __func__, iter);
+
+       offset = snprintf(buffer, buffer_size, "STATS INNER LOOP");
+       offset += lck_mtx_test_mtx_uncontended(iter, &buffer[offset], buffer_size - offset);
+
+       offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+       offset += lck_mtx_test_mtx_uncontended_loop_time(iter, &buffer[offset], buffer_size - offset);
+
+       error = SYSCTL_OUT(req, buffer, offset);
+
+       kfree(buffer, buffer_size);
+       return error;
+}
+
+static int
+sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+       char* buffer;
+       int buffer_size, offset, error, iter;
+       char input_val[40];
+
+       printf("%s called\n", __func__);
+
+       if (!req->newptr) {
+               return 0;
+       }
+
+       if (!req->oldptr) {
+               return EINVAL;
+       }
+
+       if (req->newlen >= sizeof(input_val)) {
+               return EINVAL;
+       }
+
+       error = SYSCTL_IN(req, input_val, req->newlen);
+       if (error) {
+               return error;
+       }
+       input_val[req->newlen] = '\0';
+
+       sscanf(input_val, "%d", &iter);
+
+       if (iter <= 0) {
+               printf("%s requested %d iterations, not starting the test\n", __func__, iter);
+               return EINVAL;
+       }
+
+       lck_mtx_test_init();
+
+       erase_all_test_mtx_stats();
+
+       buffer_size = 1000;
+       offset = 0;
+       buffer = kalloc(buffer_size);
+       if (!buffer)
+               panic("Impossible to allocate memory for %s\n", __func__);
+       memset(buffer, 0, buffer_size);
+
+       printf("%s starting contended mutex test with %d iterations\n", __func__, iter);
+
+       offset = snprintf(buffer, buffer_size, "STATS INNER LOOP");
+       offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset);
+
+       printf("%s starting contended mutex loop test with %d iterations\n", __func__, iter);
+
+       offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP");
+       offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset);
+
+       error = SYSCTL_OUT(req, buffer, offset);
+
+       kfree(buffer, buffer_size);
+
+       return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_lock, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_lck_mtx_test_lock, "I", "lck mtx test lock");
+
+SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_unlock, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_lck_mtx_test_unlock, "I", "lck mtx test unlock");
+
+SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics");
+
+SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED| CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics");
+
+SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test");
+
+SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
+       0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test");
+
+#if defined (__x86_64__)
+
+semaphore_t sysctl_test_panic_with_thread_sem;
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */
+__attribute__((noreturn))
+static void
+panic_thread_test_child_spin(void * arg, wait_result_t wres)
+{
+       static int panic_thread_recurse_count = 5;
+
+       if (panic_thread_recurse_count > 0) {
+               panic_thread_recurse_count--;
+               panic_thread_test_child_spin(arg, wres);
+       }
+
+       semaphore_signal(sysctl_test_panic_with_thread_sem);
+       while (1) { ; }
+}
+#pragma clang diagnostic pop
+
+static void
+panic_thread_test_child_park(void * arg __unused, wait_result_t wres __unused)
+{
+       int event;
+
+       assert_wait(&event, THREAD_UNINT);
+       semaphore_signal(sysctl_test_panic_with_thread_sem);
+       thread_block(panic_thread_test_child_park);
+}
+
+static int
+sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       int rval = 0;
+       char str[16] = { '\0' };
+       thread_t child_thread = THREAD_NULL;
+
+       rval = sysctl_handle_string(oidp, str, sizeof(str), req);
+       if (rval != 0 || !req->newptr) {
+               return EINVAL;
+       }
+
+       semaphore_create(kernel_task, &sysctl_test_panic_with_thread_sem, SYNC_POLICY_FIFO, 0);
+
+       /* Create thread to spin or park in continuation */
+       if (strncmp("spin", str, strlen("spin")) == 0) {
+               if (kernel_thread_start(panic_thread_test_child_spin, NULL, &child_thread) != KERN_SUCCESS) {
+                       semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
+                       return EBUSY;
+               }
+       } else if (strncmp("continuation", str, strlen("continuation")) == 0) {
+               if (kernel_thread_start(panic_thread_test_child_park, NULL, &child_thread) != KERN_SUCCESS) {
+                       semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
+                       return EBUSY;
+               }
+       } else {
+               semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem);
+               return EINVAL;
+       }
+
+       semaphore_wait(sysctl_test_panic_with_thread_sem);
+
+       panic_with_thread_context(0, NULL, 0, child_thread, "testing panic_with_thread_context for thread %p", child_thread);
+
+       /* Not reached */
+       return EINVAL;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread, CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING,
+               0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread");
+#endif /* defined (__x86_64__) */
+#endif /* DEVELOPMENT || DEBUG */