]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vm/vm_unix.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
index 099a70fb612edb91629ecec103ed899c3cf2d03b..d3109c5646d200dd91160196818ea497dd02d7c4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -37,9 +37,6 @@
  * is included in support of clause 2.2 (b) of the Apple Public License,
  * Version 2.0.
  */
-
-#include <meta_features.h>
-
 #include <vm/vm_options.h>
 
 #include <kern/task.h>
@@ -48,6 +45,7 @@
 #include <kern/extmod_statistics.h>
 #include <mach/mach_traps.h>
 #include <mach/port.h>
+#include <mach/sdt.h>
 #include <mach/task.h>
 #include <mach/task_access.h>
 #include <mach/task_special_ports.h>
@@ -81,6 +79,9 @@
 #include <sys/kas_info.h>
 #include <sys/socket.h>
 #include <sys/socketvar.h>
+#if NECP
+#include <net/necp.h>
+#endif /* NECP */
 
 #include <security/audit/audit.h>
 #include <security/mac.h>
@@ -91,8 +92,6 @@
 #include <vm/vm_kern.h>
 #include <vm/vm_pageout.h>
 
-#include <machine/spl.h>
-
 #include <mach/shared_region.h>
 #include <vm/vm_shared_region.h>
 
 
 #include <sys/kern_memorystatus.h>
 
+#if CONFIG_MACF
+#include <security/mac_framework.h>
+#endif
+
+#if CONFIG_CSR
+#include <sys/csr.h>
+#endif /* CONFIG_CSR */
 
 int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
 int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
 
+#if VM_MAP_DEBUG_APPLE_PROTECT
+SYSCTL_INT(_vm, OID_AUTO, map_debug_apple_protect, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_map_debug_apple_protect, 0, "");
+#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
+
+#if VM_MAP_DEBUG_FOURK
+SYSCTL_INT(_vm, OID_AUTO, map_debug_fourk, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_map_debug_fourk, 0, "");
+#endif /* VM_MAP_DEBUG_FOURK */
+
+#if DEVELOPMENT || DEBUG
+
+static int
+sysctl_kmem_alloc_contig SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       vm_offset_t     kaddr;
+       kern_return_t   kr;
+       int     error = 0;
+       int     size = 0;
+
+       error = sysctl_handle_int(oidp, &size, 0, req);
+       if (error || !req->newptr)
+               return (error);
+
+       kr = kmem_alloc_contig(kernel_map, &kaddr, (vm_size_t)size, 0, 0, 0, 0, VM_KERN_MEMORY_IOKIT);
+
+       if (kr == KERN_SUCCESS)
+               kmem_free(kernel_map, kaddr, size);
+
+       return error;
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, kmem_alloc_contig, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+           0, 0, &sysctl_kmem_alloc_contig, "I", "");
+
+extern int vm_region_footprint;
+SYSCTL_INT(_vm, OID_AUTO, region_footprint, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, &vm_region_footprint, 0, "");
+static int
+sysctl_vm_self_region_footprint SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       int     error = 0;
+       int     value;
+
+       value = task_self_region_footprint();
+       error = SYSCTL_OUT(req, &value, sizeof (int));
+       if (error) {
+               return error;
+       }
+
+       if (!req->newptr) {
+               return 0;
+       }
+
+       error = SYSCTL_IN(req, &value, sizeof (int));
+       if (error) {
+               return (error);
+       }
+       task_self_region_footprint_set(value);
+       return 0;
+}
+SYSCTL_PROC(_vm, OID_AUTO, self_region_footprint, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED, 0, 0, &sysctl_vm_self_region_footprint, "I", "");
+
+#endif /* DEVELOPMENT || DEBUG */
+
+
+#if CONFIG_EMBEDDED
+
+#if DEVELOPMENT || DEBUG
+extern int panic_on_unsigned_execute;
+SYSCTL_INT(_vm, OID_AUTO, panic_on_unsigned_execute, CTLFLAG_RW | CTLFLAG_LOCKED, &panic_on_unsigned_execute, 0, "");
+#endif /* DEVELOPMENT || DEBUG */
+
+extern int log_executable_mem_entry;
+extern int cs_executable_create_upl;
+extern int cs_executable_mem_entry;
+extern int cs_executable_wire;
+SYSCTL_INT(_vm, OID_AUTO, log_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &log_executable_mem_entry, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_create_upl, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_create_upl, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_mem_entry, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_wire, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_wire, 0, "");
+#endif /* CONFIG_EMBEDDED */
+
 #if DEVELOPMENT || DEBUG
 extern int radar_20146450;
 SYSCTL_INT(_vm, OID_AUTO, radar_20146450, CTLFLAG_RW | CTLFLAG_LOCKED, &radar_20146450, 0, "");
@@ -114,6 +202,38 @@ SYSCTL_INT(_vm, OID_AUTO, macho_printf, CTLFLAG_RW | CTLFLAG_LOCKED, &macho_prin
 extern int apple_protect_pager_data_request_debug;
 SYSCTL_INT(_vm, OID_AUTO, apple_protect_pager_data_request_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &apple_protect_pager_data_request_debug, 0, "");
 
+#if __arm__ || __arm64__
+/* These are meant to support the page table accounting unit test. */
+extern unsigned int arm_hardware_page_size;
+extern unsigned int arm_pt_desc_size;
+extern unsigned int arm_pt_root_size;
+extern unsigned int free_page_size_tt_count;
+extern unsigned int free_two_page_size_tt_count;
+extern unsigned int free_tt_count;
+extern unsigned int inuse_user_tteroot_count;
+extern unsigned int inuse_kernel_tteroot_count;
+extern unsigned int inuse_user_ttepages_count;
+extern unsigned int inuse_kernel_ttepages_count;
+extern unsigned int inuse_user_ptepages_count;
+extern unsigned int inuse_kernel_ptepages_count;
+SYSCTL_UINT(_vm, OID_AUTO, native_hw_pagesize, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_hardware_page_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, arm_pt_desc_size, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_pt_desc_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, arm_pt_root_size, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_pt_root_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_1page_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_page_size_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_2page_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_two_page_size_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_tteroot_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_tteroot_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_tte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_ttepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_tte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_ttepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_pte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_ptepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_pte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_ptepages_count, 0, "");
+#endif /* __arm__ || __arm64__ */
+
+#if __arm64__
+extern int fourk_pager_data_request_debug;
+SYSCTL_INT(_vm, OID_AUTO, fourk_pager_data_request_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &fourk_pager_data_request_debug, 0, "");
+#endif /* __arm64__ */
 #endif /* DEVELOPMENT || DEBUG */
 
 SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor, 0, "");
@@ -154,13 +274,13 @@ __attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL_
  * Sysctl's related to data/stack execution.  See osfmk/vm/vm_map.c
  */
 
-#ifndef SECURE_KERNEL
+#if DEVELOPMENT || DEBUG
 extern int allow_stack_exec, allow_data_exec;
 
 SYSCTL_INT(_vm, OID_AUTO, allow_stack_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_stack_exec, 0, "");
 SYSCTL_INT(_vm, OID_AUTO, allow_data_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_data_exec, 0, "");
 
-#endif /* !SECURE_KERNEL */
+#endif /* DEVELOPMENT || DEBUG */
 
 static const char *prot_values[] = {
        "none",
@@ -180,6 +300,13 @@ log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot)
                current_proc()->p_comm, current_proc()->p_pid, vaddr, prot_values[prot & VM_PROT_ALL]);
 }
 
+/*
+ * shared_region_unnest_logging: level of logging of unnesting events
+ * 0   - no logging
+ * 1   - throttled logging of unexpected unnesting events (default)
+ * 2   - unthrottled logging of unexpected unnesting events
+ * 3+  - unthrottled logging of all unnesting events
+ */
 int shared_region_unnest_logging = 1;
 
 SYSCTL_INT(_vm, OID_AUTO, shared_region_unnest_logging, CTLFLAG_RW | CTLFLAG_LOCKED,
@@ -192,11 +319,27 @@ int shared_region_unnest_log_count_threshold = 5;
  * Shared cache path enforcement.
  */
 
+#ifndef CONFIG_EMBEDDED
 static int scdir_enforce = 1;
 static char scdir_path[] = "/var/db/dyld/";
+#else
+static int scdir_enforce = 0;
+static char scdir_path[] = "/System/Library/Caches/com.apple.dyld/";
+#endif
 
 #ifndef SECURE_KERNEL
-SYSCTL_INT(_vm, OID_AUTO, enforce_shared_cache_dir, CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, "");
+static int sysctl_scdir_enforce SYSCTL_HANDLER_ARGS
+{
+#if CONFIG_CSR
+       if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0) {
+               printf("Failed attempt to set vm.enforce_shared_cache_dir sysctl\n");
+               return EPERM;
+       }
+#endif /* CONFIG_CSR */
+       return sysctl_handle_int(oidp, arg1, arg2, req);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, enforce_shared_cache_dir, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, sysctl_scdir_enforce, "I", "");
 #endif
 
 /* These log rate throttling state variables aren't thread safe, but
@@ -205,27 +348,46 @@ SYSCTL_INT(_vm, OID_AUTO, enforce_shared_cache_dir, CTLFLAG_RW | CTLFLAG_LOCKED,
 static int64_t last_unnest_log_time = 0; 
 static int shared_region_unnest_log_count = 0;
 
-void log_unnest_badness(
+void
+log_unnest_badness(
        vm_map_t        m,
        vm_map_offset_t s,
-       vm_map_offset_t e) {
+       vm_map_offset_t e,
+       boolean_t       is_nested_map,
+       vm_map_offset_t lowest_unnestable_addr)
+{
        struct timeval  tv;
 
        if (shared_region_unnest_logging == 0)
                return;
 
-       if (shared_region_unnest_logging == 1) {
+       if (shared_region_unnest_logging <= 2 &&
+           is_nested_map &&
+           s >= lowest_unnestable_addr) {
+               /*
+                * Unnesting of writable map entries is fine.
+                */
+               return;
+       }
+
+       if (shared_region_unnest_logging <= 1) {
                microtime(&tv);
-               if ((tv.tv_sec - last_unnest_log_time) < vm_shared_region_unnest_log_interval) {
-                       if (shared_region_unnest_log_count++ > shared_region_unnest_log_count_threshold)
+               if ((tv.tv_sec - last_unnest_log_time) <
+                   vm_shared_region_unnest_log_interval) {
+                       if (shared_region_unnest_log_count++ >
+                           shared_region_unnest_log_count_threshold)
                                return;
-               }
-               else {
+               } else {
                        last_unnest_log_time = tv.tv_sec;
                        shared_region_unnest_log_count = 0;
                }
        }
 
+       DTRACE_VM4(log_unnest_badness,
+                  vm_map_t, m,
+                  vm_map_offset_t, s,
+                  vm_map_offset_t, e,
+                  vm_map_offset_t, lowest_unnestable_addr);
        printf("%s[%d] triggered unnest of range 0x%qx->0x%qx of DYLD shared region in VM map %p. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, current_proc()->p_pid, (uint64_t)s, (uint64_t)e, (void *) VM_KERNEL_ADDRPERM(m));
 }
 
@@ -256,12 +418,12 @@ vslock(
        vm_map_t        map;
 
        map = current_map();
-       kret = vm_map_wire(map,
+       kret = vm_map_wire_kernel(map,
                           vm_map_trunc_page(addr,
                                             vm_map_page_mask(map)),
                           vm_map_round_page(addr+len,
                                             vm_map_page_mask(map)), 
-                          VM_PROT_READ | VM_PROT_WRITE | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_BSD),
+                          VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_BSD,
                           FALSE);
 
        switch (kret) {
@@ -495,7 +657,7 @@ pid_for_task(
        AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
        AUDIT_ARG(mach_port1, t);
 
-       t1 = port_name_to_task(t);
+       t1 = port_name_to_task_inspect(t);
 
        if (t1 == TASK_NULL) {
                err = KERN_FAILURE;
@@ -505,7 +667,10 @@ pid_for_task(
                if (p) {
                        pid  = proc_pid(p);
                        err = KERN_SUCCESS;
-               } else {
+               } else if (is_corpsetask(t1)) {
+                       pid = task_pid(t1);
+                       err = KERN_SUCCESS;
+               }else {
                        err = KERN_FAILURE;
                }
        }
@@ -637,8 +802,9 @@ task_for_pid(
        user_addr_t             task_addr = args->t;
        proc_t                  p = PROC_NULL;
        task_t                  t1 = TASK_NULL;
+       task_t                  task = TASK_NULL;
        mach_port_name_t        tret = MACH_PORT_NULL;
-       ipc_port_t              tfpport;
+       ipc_port_t              tfpport = MACH_PORT_NULL;
        void * sright;
        int error = 0;
 
@@ -676,52 +842,86 @@ task_for_pid(
                goto tfpout;
        }
 
-       if (p->task != TASK_NULL) {
-               /* If we aren't root and target's task access port is set... */
-               if (!kauth_cred_issuser(kauth_cred_get()) &&
-                       p != current_proc() &&
-                       (task_get_task_access_port(p->task, &tfpport) == 0) &&
-                       (tfpport != IPC_PORT_NULL)) {
+       if (p->task == TASK_NULL) {
+               error = KERN_SUCCESS;
+               goto tfpout;
+       }
 
-                       if (tfpport == IPC_PORT_DEAD) {
-                               error = KERN_PROTECTION_FAILURE;
-                               goto tfpout;
-                       }
+#if CONFIG_MACF
+       error = mac_proc_check_get_task(kauth_cred_get(), p);
+       if (error) {
+               error = KERN_FAILURE;
+               goto tfpout;
+       }
+#endif
 
-                       /* Call up to the task access server */
-                       error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+       /* Grab a task reference since the proc ref might be dropped if an upcall to task access server is made */
+       task = p->task;
+       task_reference(task);
 
-                       if (error != MACH_MSG_SUCCESS) {
-                               if (error == MACH_RCV_INTERRUPTED)
-                                       error = KERN_ABORTED;
-                               else
-                                       error = KERN_FAILURE;
-                               goto tfpout;
-                       }
+       /* If we aren't root and target's task access port is set... */
+       if (!kauth_cred_issuser(kauth_cred_get()) &&
+               p != current_proc() &&
+               (task_get_task_access_port(task, &tfpport) == 0) &&
+               (tfpport != IPC_PORT_NULL)) {
+
+               if (tfpport == IPC_PORT_DEAD) {
+                       error = KERN_PROTECTION_FAILURE;
+                       goto tfpout;
                }
-#if CONFIG_MACF
-               error = mac_proc_check_get_task(kauth_cred_get(), p);
-               if (error) {
-                       error = KERN_FAILURE;
+
+               /*
+                * Drop the proc_find proc ref before making an upcall
+                * to taskgated, since holding a proc_find
+                * ref while making an upcall can cause deadlock.
+                */
+               proc_rele(p);
+               p = PROC_NULL;
+
+               /* Call up to the task access server */
+               error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+
+               if (error != MACH_MSG_SUCCESS) {
+                       if (error == MACH_RCV_INTERRUPTED)
+                               error = KERN_ABORTED;
+                       else
+                               error = KERN_FAILURE;
                        goto tfpout;
                }
-#endif
+       }
 
-               /* Grant task port access */
-               task_reference(p->task);
-               extmod_statistics_incr_task_for_pid(p->task);
+       /* Grant task port access */
+       extmod_statistics_incr_task_for_pid(task);
+       sright = (void *) convert_task_to_port(task);
+
+       /* Check if the task has been corpsified */
+       if (is_corpsetask(task)) {
+               /* task ref consumed by convert_task_to_port */
+               task = TASK_NULL;
+               ipc_port_release_send(sright);
+               error = KERN_FAILURE;
+               goto tfpout;
+       }
+
+       /* task ref consumed by convert_task_to_port */
+       task = TASK_NULL;
+       tret = ipc_port_copyout_send(
+                       sright,
+                       get_task_ipcspace(current_task()));
 
-               sright = (void *) convert_task_to_port(p->task);
-               tret = ipc_port_copyout_send(
-                               sright, 
-                               get_task_ipcspace(current_task()));
-       } 
        error = KERN_SUCCESS;
 
 tfpout:
        task_deallocate(t1);
        AUDIT_ARG(mach_port2, tret);
        (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
+
+       if (tfpport != IPC_PORT_NULL) {
+               ipc_port_release_send(tfpport);
+       }
+       if (task != TASK_NULL) {
+               task_deallocate(task);
+       }
        if (p != PROC_NULL)
                proc_rele(p);
        AUDIT_MACH_SYSCALL_EXIT(error);
@@ -849,6 +1049,7 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret)
        }
 
        target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
        if (target != TASK_NULL) {
                mach_port_t tfpport;
 
@@ -875,6 +1076,7 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret)
                        }
                }
        }
+#endif
 
        task_reference(target);
        error = task_pidsuspend(target);
@@ -933,6 +1135,7 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret)
        }
 
        target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
        if (target != TASK_NULL) {
                mach_port_t tfpport;
 
@@ -959,7 +1162,13 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret)
                        }
                }
        }
+#endif
 
+#if CONFIG_EMBEDDED
+#if SOCKETS
+       resume_proc_sockets(targetproc);
+#endif /* SOCKETS */
+#endif /* CONFIG_EMBEDDED */
 
        task_reference(target);
 
@@ -990,6 +1199,202 @@ out:
        return error;
 }
 
+#if CONFIG_EMBEDDED
+/*
+ * Freeze the specified process (provided in args->pid), or find and freeze a PID.
+ * When a process is specified, this call is blocking, otherwise we wake up the
+ * freezer thread and do not block on a process being frozen.
+ */
+kern_return_t
+pid_hibernate(struct proc *p __unused, struct pid_hibernate_args *args, int *ret)
+{
+       int     error = 0;
+       proc_t  targetproc = PROC_NULL;
+       int     pid = args->pid;
+
+#ifndef CONFIG_FREEZE
+       #pragma unused(pid)
+#else
+
+#if CONFIG_MACF
+       error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_HIBERNATE);
+       if (error) {
+               error = EPERM;
+               goto out;
+       }
+#endif
+
+       /*
+        * If a pid has been provided, we obtain the process handle and call task_for_pid_posix_check().
+        */
+
+       if (pid >= 0) {
+               targetproc = proc_find(pid);
+
+               if (targetproc == PROC_NULL) {
+                       error = ESRCH;
+                       goto out;
+               }
+
+               if (!task_for_pid_posix_check(targetproc)) {
+                       error = EPERM;
+                       goto out;
+               }
+       }
+
+       if (pid == -2) {
+               vm_pageout_anonymous_pages();
+       } else if (pid == -1) {
+               memorystatus_on_inactivity(targetproc);
+       } else {
+               error = memorystatus_freeze_process_sync(targetproc);
+       }
+
+out:
+
+#endif /* CONFIG_FREEZE */
+
+       if (targetproc != PROC_NULL)
+               proc_rele(targetproc);
+       *ret = error;
+       return error;
+}
+#endif /* CONFIG_EMBEDDED */
+
+#if SOCKETS
+int
+networking_memstatus_callout(proc_t p, uint32_t status)
+{
+       struct filedesc *fdp;
+       int i;
+
+       /*
+        * proc list lock NOT held
+        * proc lock NOT held
+        * a reference on the proc has been held / shall be dropped by the caller.
+        */
+       LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+       LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
+
+       proc_fdlock(p);
+       fdp = p->p_fd;
+       for (i = 0; i < fdp->fd_nfiles; i++) {
+               struct fileproc *fp;
+
+               fp = fdp->fd_ofiles[i];
+               if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
+                       continue;
+               }
+               switch (FILEGLOB_DTYPE(fp->f_fglob)) {
+#if NECP
+               case DTYPE_NETPOLICY:
+                       necp_fd_memstatus(p, status,
+                           (struct necp_fd_data *)fp->f_fglob->fg_data);
+                       break;
+#endif /* NECP */
+               default:
+                       break;
+               }
+       }
+       proc_fdunlock(p);
+
+       return (1);
+}
+
+
+static int
+networking_defunct_callout(proc_t p, void *arg)
+{
+       struct pid_shutdown_sockets_args *args = arg;
+       int pid = args->pid;
+       int level = args->level;
+       struct filedesc *fdp;
+       int i;
+
+       proc_fdlock(p);
+       fdp = p->p_fd;
+       for (i = 0; i < fdp->fd_nfiles; i++) {
+               struct fileproc *fp = fdp->fd_ofiles[i];
+               struct fileglob *fg;
+
+               if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
+                       continue;
+               }
+
+               fg = fp->f_fglob;
+               switch (FILEGLOB_DTYPE(fg)) {
+               case DTYPE_SOCKET: {
+                       struct socket *so = (struct socket *)fg->fg_data;
+                       if (p->p_pid == pid || so->last_pid == pid || 
+                           ((so->so_flags & SOF_DELEGATED) && so->e_pid == pid)) {
+                               /* Call networking stack with socket and level */
+                               (void) socket_defunct(p, so, level);
+                       }
+                       break;
+               }
+#if NECP
+               case DTYPE_NETPOLICY:
+                       /* first pass: defunct necp and get stats for ntstat */
+                       if (p->p_pid == pid) {
+                               necp_fd_defunct(p,
+                                   (struct necp_fd_data *)fg->fg_data);
+                       }
+                       break;
+#endif /* NECP */
+               default:
+                       break;
+               }
+       }
+
+       proc_fdunlock(p);
+
+       return (PROC_RETURNED);
+}
+
+int
+pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args *args, int *ret)
+{
+       int                             error = 0;
+       proc_t                          targetproc = PROC_NULL;
+       int                             pid = args->pid;
+       int                             level = args->level;
+
+       if (level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC &&
+           level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) {
+               error = EINVAL;
+               goto out;
+       }
+
+#if CONFIG_MACF
+       error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_SHUTDOWN_SOCKETS);
+       if (error) {
+               error = EPERM;
+               goto out;
+       }
+#endif
+
+       targetproc = proc_find(pid);
+       if (targetproc == PROC_NULL) {
+               error = ESRCH;
+               goto out;
+       }
+
+       if (!task_for_pid_posix_check(targetproc)) {
+               error = EPERM;
+               goto out;
+       }
+
+       proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS,
+           networking_defunct_callout, args, NULL, NULL);
+
+out:
+       if (targetproc != PROC_NULL)
+               proc_rele(targetproc);
+       *ret = error;
+       return error;
+}
+
+#endif /* SOCKETS */
 
 static int
 sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1,
@@ -1177,6 +1582,7 @@ _shared_region_map_and_slide(
 #endif
        memory_object_control_t         file_control;
        struct vm_shared_region         *shared_region;
+       uint32_t                        i;
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] -> map\n",
@@ -1257,16 +1663,6 @@ _shared_region_map_and_slide(
        }
 #endif /* MAC */
 
-#if CONFIG_PROTECT
-       /* check for content protection access */
-       {
-               error = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0);
-               if (error) { 
-                       goto done;
-               }
-       }
-#endif /* CONFIG_PROTECT */
-
        /* make sure vnode is on the process's root volume */
        root_vp = p->p_fd->fd_rdir;
        if (root_vp == NULL) {
@@ -1371,9 +1767,39 @@ _shared_region_map_and_slide(
                goto done;
        }
 
+       /* check that the mappings are properly covered by code signatures */
+       if (!cs_system_enforcement()) {
+               /* code signing is not enforced: no need to check */
+       } else for (i = 0; i < mappings_count; i++) {
+               if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
+                       /* zero-filled mapping: not backed by the file */
+                       continue;
+               }
+               if (ubc_cs_is_range_codesigned(vp,
+                                              mappings[i].sfm_file_offset,
+                                              mappings[i].sfm_size)) {
+                       /* this mapping is fully covered by code signatures */
+                       continue;
+               }
+               SHARED_REGION_TRACE_ERROR(
+                       ("shared_region: %p [%d(%s)] map(%p:'%s'): "
+                        "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] "
+                        "is not code-signed\n",
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+                        i, mappings_count,
+                        mappings[i].sfm_address,
+                        mappings[i].sfm_size,
+                        mappings[i].sfm_file_offset,
+                        mappings[i].sfm_max_prot,
+                        mappings[i].sfm_init_prot));
+               error = EINVAL;
+               goto done;
+       }
 
        /* get the process's shared region (setup in vm_map_exec()) */
-       shared_region = vm_shared_region_get(current_task());
+       shared_region = vm_shared_region_trim_and_get(current_task());
        if (shared_region == NULL) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
@@ -1381,6 +1807,7 @@ _shared_region_map_and_slide(
                         (void *)VM_KERNEL_ADDRPERM(current_thread()),
                         p->p_pid, p->p_comm,
                         (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
+               error = EINVAL;
                goto done;
        }
 
@@ -1553,9 +1980,8 @@ extern unsigned int       vm_page_free_target;
 SYSCTL_INT(_vm, OID_AUTO, vm_page_free_target, CTLFLAG_RD | CTLFLAG_LOCKED, 
                   &vm_page_free_target, 0, "Pageout daemon free target");
 
-extern unsigned int    vm_memory_pressure;
 SYSCTL_INT(_vm, OID_AUTO, memory_pressure, CTLFLAG_RD | CTLFLAG_LOCKED,
-          &vm_memory_pressure, 0, "Memory pressure indicator");
+          &vm_pageout_state.vm_memory_pressure, 0, "Memory pressure indicator");
 
 static int
 vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS
@@ -1578,9 +2004,42 @@ extern unsigned int      vm_page_purgeable_wired_count;
 SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_purgeable_wired_count, 0, "Wired purgeable page count");
 
-extern unsigned int    vm_pageout_purged_objects;
+#if DEVELOPMENT || DEBUG
+extern uint64_t get_pages_grabbed_count(void);
+
+static int
+pages_grabbed SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       uint64_t value = get_pages_grabbed_count();
+       return SYSCTL_OUT(req, &value, sizeof(value));
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, pages_grabbed, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+           0, 0, &pages_grabbed, "QU", "Total pages grabbed");
+SYSCTL_ULONG(_vm, OID_AUTO, pages_freed, CTLFLAG_RD | CTLFLAG_LOCKED,
+            &vm_pageout_vminfo.vm_page_pages_freed, "Total pages freed");
+
 SYSCTL_INT(_vm, OID_AUTO, pageout_purged_objects, CTLFLAG_RD | CTLFLAG_LOCKED,
-          &vm_pageout_purged_objects, 0, "System purged object count");
+          &vm_pageout_debug.vm_pageout_purged_objects, 0, "System purged object count");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)");
+
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_vminfo.vm_pageout_freed_cleaned, "Cleaned pages freed");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */
+#endif
 
 extern int madvise_free_debug;
 SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
@@ -1592,7 +2051,7 @@ SYSCTL_QUAD(_vm, OID_AUTO, reusable_success, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_pages_success, "");
 SYSCTL_QUAD(_vm, OID_AUTO, reusable_failure, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_pages_failure, "");
-SYSCTL_QUAD(_vm, OID_AUTO, reusable_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_pages_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_pages_shared, "");
 SYSCTL_QUAD(_vm, OID_AUTO, all_reusable_calls, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.all_reusable_calls, "");
@@ -1612,6 +2071,12 @@ SYSCTL_QUAD(_vm, OID_AUTO, can_reuse_failure, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.can_reuse_failure, "");
 SYSCTL_QUAD(_vm, OID_AUTO, reusable_reclaimed, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_reclaimed, "");
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_nonwritable, CTLFLAG_RD | CTLFLAG_LOCKED,
+          &vm_page_stats_reusable.reusable_nonwritable, "");
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+          &vm_page_stats_reusable.reusable_shared, "");
+SYSCTL_QUAD(_vm, OID_AUTO, free_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+          &vm_page_stats_reusable.free_shared, "");
 
 
 extern unsigned int vm_page_free_count, vm_page_speculative_count;
@@ -1621,42 +2086,80 @@ SYSCTL_UINT(_vm, OID_AUTO, page_speculative_count, CTLFLAG_RD | CTLFLAG_LOCKED,
 extern unsigned int vm_page_cleaned_count;
 SYSCTL_UINT(_vm, OID_AUTO, page_cleaned_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_cleaned_count, 0, "Cleaned queue size");
 
+extern unsigned int vm_page_pageable_internal_count, vm_page_pageable_external_count;
+SYSCTL_UINT(_vm, OID_AUTO, page_pageable_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_internal_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_pageable_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_external_count, 0, "");
+
 /* pageout counts */
-extern unsigned int vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external, vm_pageout_inactive_clean, vm_pageout_speculative_clean, vm_pageout_inactive_used;
-extern unsigned int vm_pageout_freed_from_inactive_clean, vm_pageout_freed_from_speculative;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_internal, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_external, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_speculative_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_speculative_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_used, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_used, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_speculative, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_speculative, 0, "");
-
-extern unsigned int vm_pageout_freed_from_cleaned;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_cleaned, 0, "");
-
-/* counts of pages entering the cleaned queue */
-extern unsigned int vm_pageout_enqueued_cleaned, vm_pageout_enqueued_cleaned_from_inactive_clean, vm_pageout_enqueued_cleaned_from_inactive_dirty;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_dirty, 0, "");
-
-/* counts of pages leaving the cleaned queue */
-extern unsigned int vm_pageout_cleaned_reclaimed, vm_pageout_cleaned_reactivated, vm_pageout_cleaned_reference_reactivated, vm_pageout_cleaned_volatile_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated, vm_pageout_cleaned_busy, vm_pageout_cleaned_nolock;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reclaimed, 0, "Cleaned pages reclaimed");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_commit_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_commit_reactivated, 0, "Cleaned pages commit reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_pageout_inactive_clean, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_used, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_pageout_inactive_used, 0, "");
+
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_inactive_dirty_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_inactive_dirty_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_inactive_dirty_external, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_speculative_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_speculative, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_external, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_speculative, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_speculative, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_cleaned, "");
+
 
 /* counts of pages prefaulted when entering a memory object */
 extern int64_t vm_prefault_nb_pages, vm_prefault_nb_bailout;
 SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_pages, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_pages, "");
 SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_bailout, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_bailout, "");
 
+#if defined (__x86_64__)
+extern unsigned int vm_clump_promote_threshold;
+SYSCTL_UINT(_vm, OID_AUTO, vm_clump_promote_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_clump_promote_threshold, 0, "clump size threshold for promotes");
+#if DEVELOPMENT || DEBUG
+extern unsigned long vm_clump_stats[];
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats1, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[1], "free page allocations from clump of 1 page");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats2, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[2], "free page allocations from clump of 2 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats3, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[3], "free page allocations from clump of 3 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats4, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[4], "free page allocations from clump of 4 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats5, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[5], "free page allocations from clump of 5 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats6, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[6], "free page allocations from clump of 6 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats7, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[7], "free page allocations from clump of 7 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats8, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[8], "free page allocations from clump of 8 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats9, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[9], "free page allocations from clump of 9 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats10, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[10], "free page allocations from clump of 10 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats11, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[11], "free page allocations from clump of 11 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats12, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[12], "free page allocations from clump of 12 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats13, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[13], "free page allocations from clump of 13 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats14, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[14], "free page allocations from clump of 14 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats15, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[15], "free page allocations from clump of 15 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats16, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[16], "free page allocations from clump of 16 pages");
+extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_alloc, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_allocs, "free page allocations");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_inserts, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_inserts, "free page insertions");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_inrange, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_inrange, "free page insertions that are part of vm_pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_promotes, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_promotes, "pages promoted to head");
+#endif  /* if DEVELOPMENT || DEBUG */
+#endif  /* #if defined (__x86_64__) */
+
+#if CONFIG_SECLUDED_MEMORY
+
+SYSCTL_UINT(_vm, OID_AUTO, num_tasks_can_use_secluded_mem, CTLFLAG_RD | CTLFLAG_LOCKED, &num_tasks_can_use_secluded_mem, 0, "");
+extern unsigned int vm_page_secluded_target;
+extern unsigned int vm_page_secluded_count;
+extern unsigned int vm_page_secluded_count_free;
+extern unsigned int vm_page_secluded_count_inuse;
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_target, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_target, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count_free, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count_free, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count_inuse, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count_inuse, 0, "");
+
+extern struct vm_page_secluded_data vm_page_secluded;
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_eligible, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.eligible_for_secluded, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_success_free, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_success_free, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_success_other, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_success_other, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_locked, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_locked, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_state, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_state, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_dirty, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit_success, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit_success, 0, "");
+
+#endif /* CONFIG_SECLUDED_MEMORY */
+
 #include <kern/thread.h>
 #include <sys/user.h>
 
@@ -1789,3 +2292,105 @@ kas_info(struct proc *p,
        return 0;
 #endif /* !SECURE_KERNEL */
 }
+
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#pragma clang diagnostic ignored "-Wunused-function"
+
+static void asserts() {
+       static_assert(sizeof(vm_min_kernel_address) == sizeof(unsigned long));
+       static_assert(sizeof(vm_max_kernel_address) == sizeof(unsigned long));
+}
+
+SYSCTL_ULONG(_vm, OID_AUTO, vm_min_kernel_address, CTLFLAG_RD, (unsigned long *) &vm_min_kernel_address, "");
+SYSCTL_ULONG(_vm, OID_AUTO, vm_max_kernel_address, CTLFLAG_RD, (unsigned long *) &vm_max_kernel_address, "");
+#pragma clang diagnostic pop
+
+extern uint32_t vm_page_pages;
+SYSCTL_UINT(_vm, OID_AUTO, pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pages, 0, "");
+
+extern uint32_t vm_page_busy_absent_skipped;
+SYSCTL_UINT(_vm, OID_AUTO, page_busy_absent_skipped, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_busy_absent_skipped, 0, "");
+
+#if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
+extern int vm_footprint_suspend_allowed;
+SYSCTL_INT(_vm, OID_AUTO, footprint_suspend_allowed, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_footprint_suspend_allowed, 0, "");
+
+extern void pmap_footprint_suspend(vm_map_t map, boolean_t suspend);
+static int
+sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+       int error = 0;
+       int new_value;
+
+       if (req->newptr == USER_ADDR_NULL) {
+               return 0;
+       }
+       error = SYSCTL_IN(req, &new_value, sizeof(int));
+       if (error) {
+               return error;
+       }
+       if (!vm_footprint_suspend_allowed) {
+               if (new_value != 0) {
+                       /* suspends are not allowed... */
+                       return 0;
+               }
+               /* ... but let resumes proceed */
+       }
+       DTRACE_VM2(footprint_suspend,
+                  vm_map_t, current_map(),
+                  int, new_value);
+
+       pmap_footprint_suspend(current_map(), new_value);
+
+       return 0;
+}
+SYSCTL_PROC(_vm, OID_AUTO, footprint_suspend,
+           CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+           0, 0, &sysctl_vm_footprint_suspend, "I", "");
+#endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */
+
+extern uint64_t vm_map_corpse_footprint_count;
+extern uint64_t vm_map_corpse_footprint_size_avg;
+extern uint64_t vm_map_corpse_footprint_size_max;
+extern uint64_t vm_map_corpse_footprint_full;
+extern uint64_t vm_map_corpse_footprint_no_buf;
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_count,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_avg,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_avg, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_max,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_max, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_full,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_full, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_no_buf,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, "");
+
+#if PMAP_CS
+extern uint64_t vm_cs_defer_to_pmap_cs;
+extern uint64_t vm_cs_defer_to_pmap_cs_not;
+SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, "");
+SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs_not,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, "");
+#endif /* PMAP_CS */
+
+extern uint64_t shared_region_pager_copied;
+extern uint64_t shared_region_pager_slid;
+extern uint64_t shared_region_pager_slid_error;
+extern uint64_t shared_region_pager_reclaimed;
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_copied,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_copied, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid_error,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid_error, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_reclaimed,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_reclaimed, "");
+
+#if MACH_ASSERT
+extern int pmap_ledgers_panic_leeway;
+SYSCTL_INT(_vm, OID_AUTO, pmap_ledgers_panic_leeway, CTLFLAG_RW | CTLFLAG_LOCKED, &pmap_ledgers_panic_leeway, 0, "");
+#endif /* MACH_ASSERT */