]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vm/vm_unix.c
xnu-4903.231.4.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
index 8048a6b8022ef4dd63dfdb736b96e88b73e405f7..d3109c5646d200dd91160196818ea497dd02d7c4 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <security/mac_framework.h>
 #endif
 
 #include <security/mac_framework.h>
 #endif
 
+#if CONFIG_CSR
+#include <sys/csr.h>
+#endif /* CONFIG_CSR */
+
 int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
 int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
 
 int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
 int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
 
@@ -141,7 +145,32 @@ SYSCTL_PROC(_vm, OID_AUTO, kmem_alloc_contig, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOC
            0, 0, &sysctl_kmem_alloc_contig, "I", "");
 
 extern int vm_region_footprint;
            0, 0, &sysctl_kmem_alloc_contig, "I", "");
 
 extern int vm_region_footprint;
-SYSCTL_INT(_vm, OID_AUTO, region_footprint, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_region_footprint, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, region_footprint, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, &vm_region_footprint, 0, "");
+static int
+sysctl_vm_self_region_footprint SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       int     error = 0;
+       int     value;
+
+       value = task_self_region_footprint();
+       error = SYSCTL_OUT(req, &value, sizeof (int));
+       if (error) {
+               return error;
+       }
+
+       if (!req->newptr) {
+               return 0;
+       }
+
+       error = SYSCTL_IN(req, &value, sizeof (int));
+       if (error) {
+               return (error);
+       }
+       task_self_region_footprint_set(value);
+       return 0;
+}
+SYSCTL_PROC(_vm, OID_AUTO, self_region_footprint, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED, 0, 0, &sysctl_vm_self_region_footprint, "I", "");
 
 #endif /* DEVELOPMENT || DEBUG */
 
 
 #endif /* DEVELOPMENT || DEBUG */
 
@@ -251,12 +280,6 @@ extern int allow_stack_exec, allow_data_exec;
 SYSCTL_INT(_vm, OID_AUTO, allow_stack_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_stack_exec, 0, "");
 SYSCTL_INT(_vm, OID_AUTO, allow_data_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_data_exec, 0, "");
 
 SYSCTL_INT(_vm, OID_AUTO, allow_stack_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_stack_exec, 0, "");
 SYSCTL_INT(_vm, OID_AUTO, allow_data_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_data_exec, 0, "");
 
-#if __arm64__
-extern int fourk_binary_compatibility_unsafe;
-extern int fourk_binary_compatibility_allow_wx;
-SYSCTL_INT(_vm, OID_AUTO, fourk_binary_compatibility_unsafe, CTLFLAG_RW | CTLFLAG_LOCKED, &fourk_binary_compatibility_unsafe, 0, "");
-SYSCTL_INT(_vm, OID_AUTO, fourk_binary_compatibility_allow_wx, CTLFLAG_RW | CTLFLAG_LOCKED, &fourk_binary_compatibility_allow_wx, 0, "");
-#endif /* __arm64__ */
 #endif /* DEVELOPMENT || DEBUG */
 
 static const char *prot_values[] = {
 #endif /* DEVELOPMENT || DEBUG */
 
 static const char *prot_values[] = {
@@ -305,7 +328,18 @@ static char scdir_path[] = "/System/Library/Caches/com.apple.dyld/";
 #endif
 
 #ifndef SECURE_KERNEL
 #endif
 
 #ifndef SECURE_KERNEL
-SYSCTL_INT(_vm, OID_AUTO, enforce_shared_cache_dir, CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, "");
+static int sysctl_scdir_enforce SYSCTL_HANDLER_ARGS
+{
+#if CONFIG_CSR
+       if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0) {
+               printf("Failed attempt to set vm.enforce_shared_cache_dir sysctl\n");
+               return EPERM;
+       }
+#endif /* CONFIG_CSR */
+       return sysctl_handle_int(oidp, arg1, arg2, req);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, enforce_shared_cache_dir, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, sysctl_scdir_enforce, "I", "");
 #endif
 
 /* These log rate throttling state variables aren't thread safe, but
 #endif
 
 /* These log rate throttling state variables aren't thread safe, but
@@ -768,8 +802,9 @@ task_for_pid(
        user_addr_t             task_addr = args->t;
        proc_t                  p = PROC_NULL;
        task_t                  t1 = TASK_NULL;
        user_addr_t             task_addr = args->t;
        proc_t                  p = PROC_NULL;
        task_t                  t1 = TASK_NULL;
+       task_t                  task = TASK_NULL;
        mach_port_name_t        tret = MACH_PORT_NULL;
        mach_port_name_t        tret = MACH_PORT_NULL;
-       ipc_port_t              tfpport;
+       ipc_port_t              tfpport = MACH_PORT_NULL;
        void * sright;
        int error = 0;
 
        void * sright;
        int error = 0;
 
@@ -807,60 +842,86 @@ task_for_pid(
                goto tfpout;
        }
 
                goto tfpout;
        }
 
-       if (p->task != TASK_NULL) {
-               /* If we aren't root and target's task access port is set... */
-               if (!kauth_cred_issuser(kauth_cred_get()) &&
-                       p != current_proc() &&
-                       (task_get_task_access_port(p->task, &tfpport) == 0) &&
-                       (tfpport != IPC_PORT_NULL)) {
+       if (p->task == TASK_NULL) {
+               error = KERN_SUCCESS;
+               goto tfpout;
+       }
 
 
-                       if (tfpport == IPC_PORT_DEAD) {
-                               error = KERN_PROTECTION_FAILURE;
-                               goto tfpout;
-                       }
+#if CONFIG_MACF
+       error = mac_proc_check_get_task(kauth_cred_get(), p);
+       if (error) {
+               error = KERN_FAILURE;
+               goto tfpout;
+       }
+#endif
 
 
-                       /* Call up to the task access server */
-                       error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+       /* Grab a task reference since the proc ref might be dropped if an upcall to task access server is made */
+       task = p->task;
+       task_reference(task);
 
 
-                       if (error != MACH_MSG_SUCCESS) {
-                               if (error == MACH_RCV_INTERRUPTED)
-                                       error = KERN_ABORTED;
-                               else
-                                       error = KERN_FAILURE;
-                               goto tfpout;
-                       }
-               }
-#if CONFIG_MACF
-               error = mac_proc_check_get_task(kauth_cred_get(), p);
-               if (error) {
-                       error = KERN_FAILURE;
+       /* If we aren't root and target's task access port is set... */
+       if (!kauth_cred_issuser(kauth_cred_get()) &&
+               p != current_proc() &&
+               (task_get_task_access_port(task, &tfpport) == 0) &&
+               (tfpport != IPC_PORT_NULL)) {
+
+               if (tfpport == IPC_PORT_DEAD) {
+                       error = KERN_PROTECTION_FAILURE;
                        goto tfpout;
                }
                        goto tfpout;
                }
-#endif
 
 
-               /* Grant task port access */
-               task_reference(p->task);
-               extmod_statistics_incr_task_for_pid(p->task);
+               /*
+                * Drop the proc_find proc ref before making an upcall
+                * to taskgated, since holding a proc_find
+                * ref while making an upcall can cause deadlock.
+                */
+               proc_rele(p);
+               p = PROC_NULL;
 
 
-               sright = (void *) convert_task_to_port(p->task);
+               /* Call up to the task access server */
+               error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
 
 
-               /* Check if the task has been corpsified */
-               if (is_corpsetask(p->task)) {
-                       ipc_port_release_send(sright);
-                       error = KERN_FAILURE;
+               if (error != MACH_MSG_SUCCESS) {
+                       if (error == MACH_RCV_INTERRUPTED)
+                               error = KERN_ABORTED;
+                       else
+                               error = KERN_FAILURE;
                        goto tfpout;
                }
                        goto tfpout;
                }
+       }
+
+       /* Grant task port access */
+       extmod_statistics_incr_task_for_pid(task);
+       sright = (void *) convert_task_to_port(task);
+
+       /* Check if the task has been corpsified */
+       if (is_corpsetask(task)) {
+               /* task ref consumed by convert_task_to_port */
+               task = TASK_NULL;
+               ipc_port_release_send(sright);
+               error = KERN_FAILURE;
+               goto tfpout;
+       }
+
+       /* task ref consumed by convert_task_to_port */
+       task = TASK_NULL;
+       tret = ipc_port_copyout_send(
+                       sright,
+                       get_task_ipcspace(current_task()));
 
 
-               tret = ipc_port_copyout_send(
-                               sright, 
-                               get_task_ipcspace(current_task()));
-       } 
        error = KERN_SUCCESS;
 
 tfpout:
        task_deallocate(t1);
        AUDIT_ARG(mach_port2, tret);
        (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
        error = KERN_SUCCESS;
 
 tfpout:
        task_deallocate(t1);
        AUDIT_ARG(mach_port2, tret);
        (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
+
+       if (tfpport != IPC_PORT_NULL) {
+               ipc_port_release_send(tfpport);
+       }
+       if (task != TASK_NULL) {
+               task_deallocate(task);
+       }
        if (p != PROC_NULL)
                proc_rele(p);
        AUDIT_MACH_SYSCALL_EXIT(error);
        if (p != PROC_NULL)
                proc_rele(p);
        AUDIT_MACH_SYSCALL_EXIT(error);
@@ -1201,38 +1262,90 @@ out:
 #endif /* CONFIG_EMBEDDED */
 
 #if SOCKETS
 #endif /* CONFIG_EMBEDDED */
 
 #if SOCKETS
+int
+networking_memstatus_callout(proc_t p, uint32_t status)
+{
+       struct filedesc *fdp;
+       int i;
+
+       /*
+        * proc list lock NOT held
+        * proc lock NOT held
+        * a reference on the proc has been held / shall be dropped by the caller.
+        */
+       LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+       LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
+
+       proc_fdlock(p);
+       fdp = p->p_fd;
+       for (i = 0; i < fdp->fd_nfiles; i++) {
+               struct fileproc *fp;
+
+               fp = fdp->fd_ofiles[i];
+               if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
+                       continue;
+               }
+               switch (FILEGLOB_DTYPE(fp->f_fglob)) {
+#if NECP
+               case DTYPE_NETPOLICY:
+                       necp_fd_memstatus(p, status,
+                           (struct necp_fd_data *)fp->f_fglob->fg_data);
+                       break;
+#endif /* NECP */
+               default:
+                       break;
+               }
+       }
+       proc_fdunlock(p);
+
+       return (1);
+}
+
+
 static int
 static int
-shutdown_sockets_callout(proc_t p, void *arg)
+networking_defunct_callout(proc_t p, void *arg)
 {
        struct pid_shutdown_sockets_args *args = arg;
        int pid = args->pid;
        int level = args->level;
        struct filedesc *fdp;
 {
        struct pid_shutdown_sockets_args *args = arg;
        int pid = args->pid;
        int level = args->level;
        struct filedesc *fdp;
-       struct fileproc *fp;
        int i;
 
        proc_fdlock(p);
        fdp = p->p_fd;
        for (i = 0; i < fdp->fd_nfiles; i++) {
        int i;
 
        proc_fdlock(p);
        fdp = p->p_fd;
        for (i = 0; i < fdp->fd_nfiles; i++) {
-               fp = fdp->fd_ofiles[i];
+               struct fileproc *fp = fdp->fd_ofiles[i];
+               struct fileglob *fg;
+
                if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
                        continue;
                }
                if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
                        continue;
                }
-               if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) {
-                       struct socket *so = (struct socket *)fp->f_fglob->fg_data;
+
+               fg = fp->f_fglob;
+               switch (FILEGLOB_DTYPE(fg)) {
+               case DTYPE_SOCKET: {
+                       struct socket *so = (struct socket *)fg->fg_data;
                        if (p->p_pid == pid || so->last_pid == pid || 
                            ((so->so_flags & SOF_DELEGATED) && so->e_pid == pid)) {
                                /* Call networking stack with socket and level */
                                (void) socket_defunct(p, so, level);
                        }
                        if (p->p_pid == pid || so->last_pid == pid || 
                            ((so->so_flags & SOF_DELEGATED) && so->e_pid == pid)) {
                                /* Call networking stack with socket and level */
                                (void) socket_defunct(p, so, level);
                        }
+                       break;
                }
 #if NECP
                }
 #if NECP
-               else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY &&
-                   p->p_pid == pid) {
-                       necp_defunct_client(p, fp);
-               }
+               case DTYPE_NETPOLICY:
+                       /* first pass: defunct necp and get stats for ntstat */
+                       if (p->p_pid == pid) {
+                               necp_fd_defunct(p,
+                                   (struct necp_fd_data *)fg->fg_data);
+                       }
+                       break;
 #endif /* NECP */
 #endif /* NECP */
+               default:
+                       break;
+               }
        }
        }
+
        proc_fdunlock(p);
 
        return (PROC_RETURNED);
        proc_fdunlock(p);
 
        return (PROC_RETURNED);
@@ -1271,7 +1384,8 @@ pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args *
                goto out;
        }
 
                goto out;
        }
 
-       proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS, shutdown_sockets_callout, args, NULL, NULL);
+       proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS,
+           networking_defunct_callout, args, NULL, NULL);
 
 out:
        if (targetproc != PROC_NULL)
 
 out:
        if (targetproc != PROC_NULL)
@@ -1654,7 +1768,7 @@ _shared_region_map_and_slide(
        }
 
        /* check that the mappings are properly covered by code signatures */
        }
 
        /* check that the mappings are properly covered by code signatures */
-       if (!cs_enforcement(NULL)) {
+       if (!cs_system_enforcement()) {
                /* code signing is not enforced: no need to check */
        } else for (i = 0; i < mappings_count; i++) {
                if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
                /* code signing is not enforced: no need to check */
        } else for (i = 0; i < mappings_count; i++) {
                if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
@@ -1685,7 +1799,7 @@ _shared_region_map_and_slide(
        }
 
        /* get the process's shared region (setup in vm_map_exec()) */
        }
 
        /* get the process's shared region (setup in vm_map_exec()) */
-       shared_region = vm_shared_region_get(current_task());
+       shared_region = vm_shared_region_trim_and_get(current_task());
        if (shared_region == NULL) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
        if (shared_region == NULL) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
@@ -1693,6 +1807,7 @@ _shared_region_map_and_slide(
                         (void *)VM_KERNEL_ADDRPERM(current_thread()),
                         p->p_pid, p->p_comm,
                         (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
                         (void *)VM_KERNEL_ADDRPERM(current_thread()),
                         p->p_pid, p->p_comm,
                         (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
+               error = EINVAL;
                goto done;
        }
 
                goto done;
        }
 
@@ -1865,9 +1980,8 @@ extern unsigned int       vm_page_free_target;
 SYSCTL_INT(_vm, OID_AUTO, vm_page_free_target, CTLFLAG_RD | CTLFLAG_LOCKED, 
                   &vm_page_free_target, 0, "Pageout daemon free target");
 
 SYSCTL_INT(_vm, OID_AUTO, vm_page_free_target, CTLFLAG_RD | CTLFLAG_LOCKED, 
                   &vm_page_free_target, 0, "Pageout daemon free target");
 
-extern unsigned int    vm_memory_pressure;
 SYSCTL_INT(_vm, OID_AUTO, memory_pressure, CTLFLAG_RD | CTLFLAG_LOCKED,
 SYSCTL_INT(_vm, OID_AUTO, memory_pressure, CTLFLAG_RD | CTLFLAG_LOCKED,
-          &vm_memory_pressure, 0, "Memory pressure indicator");
+          &vm_pageout_state.vm_memory_pressure, 0, "Memory pressure indicator");
 
 static int
 vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS
 
 static int
 vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS
@@ -1890,9 +2004,42 @@ extern unsigned int      vm_page_purgeable_wired_count;
 SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_purgeable_wired_count, 0, "Wired purgeable page count");
 
 SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_purgeable_wired_count, 0, "Wired purgeable page count");
 
-extern unsigned int    vm_pageout_purged_objects;
+#if DEVELOPMENT || DEBUG
+extern uint64_t get_pages_grabbed_count(void);
+
+static int
+pages_grabbed SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       uint64_t value = get_pages_grabbed_count();
+       return SYSCTL_OUT(req, &value, sizeof(value));
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, pages_grabbed, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+           0, 0, &pages_grabbed, "QU", "Total pages grabbed");
+SYSCTL_ULONG(_vm, OID_AUTO, pages_freed, CTLFLAG_RD | CTLFLAG_LOCKED,
+            &vm_pageout_vminfo.vm_page_pages_freed, "Total pages freed");
+
 SYSCTL_INT(_vm, OID_AUTO, pageout_purged_objects, CTLFLAG_RD | CTLFLAG_LOCKED,
 SYSCTL_INT(_vm, OID_AUTO, pageout_purged_objects, CTLFLAG_RD | CTLFLAG_LOCKED,
-          &vm_pageout_purged_objects, 0, "System purged object count");
+          &vm_pageout_debug.vm_pageout_purged_objects, 0, "System purged object count");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)");
+
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_vminfo.vm_pageout_freed_cleaned, "Cleaned pages freed");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */
+#endif
 
 extern int madvise_free_debug;
 SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
 
 extern int madvise_free_debug;
 SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
@@ -1944,34 +2091,16 @@ SYSCTL_UINT(_vm, OID_AUTO, page_pageable_internal_count, CTLFLAG_RD | CTLFLAG_LO
 SYSCTL_UINT(_vm, OID_AUTO, page_pageable_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_external_count, 0, "");
 
 /* pageout counts */
 SYSCTL_UINT(_vm, OID_AUTO, page_pageable_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_external_count, 0, "");
 
 /* pageout counts */
-extern unsigned int vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external, vm_pageout_inactive_clean, vm_pageout_speculative_clean, vm_pageout_inactive_used;
-extern unsigned int vm_pageout_freed_from_inactive_clean, vm_pageout_freed_from_speculative;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_internal, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_external, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_speculative_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_speculative_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_used, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_used, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_speculative, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_speculative, 0, "");
-
-extern unsigned int vm_pageout_freed_from_cleaned;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_cleaned, 0, "");
-
-/* counts of pages entering the cleaned queue */
-extern unsigned int vm_pageout_enqueued_cleaned, vm_pageout_enqueued_cleaned_from_inactive_dirty;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_dirty, 0, "");
-
-/* counts of pages leaving the cleaned queue */
-extern unsigned int vm_pageout_cleaned_reclaimed, vm_pageout_cleaned_reactivated, vm_pageout_cleaned_reference_reactivated, vm_pageout_cleaned_volatile_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated, vm_pageout_cleaned_busy, vm_pageout_cleaned_nolock;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reclaimed, 0, "Cleaned pages reclaimed");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_commit_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_commit_reactivated, 0, "Cleaned pages commit reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_pageout_inactive_clean, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_used, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_pageout_inactive_used, 0, "");
+
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_inactive_dirty_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_inactive_dirty_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_inactive_dirty_external, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_speculative_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_speculative, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_external, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_speculative, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_speculative, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_cleaned, "");
+
 
 /* counts of pages prefaulted when entering a memory object */
 extern int64_t vm_prefault_nb_pages, vm_prefault_nb_bailout;
 
 /* counts of pages prefaulted when entering a memory object */
 extern int64_t vm_prefault_nb_pages, vm_prefault_nb_bailout;
@@ -2029,9 +2158,6 @@ SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_dirty, CTLFLAG_RD | CTLFLA
 SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit, 0, "");
 SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit_success, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit_success, 0, "");
 
 SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit, 0, "");
 SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit_success, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit_success, 0, "");
 
-extern uint64_t vm_pageout_secluded_burst_count;
-SYSCTL_QUAD(_vm, OID_AUTO, pageout_secluded_burst_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_secluded_burst_count, "");
-
 #endif /* CONFIG_SECLUDED_MEMORY */
 
 #include <kern/thread.h>
 #endif /* CONFIG_SECLUDED_MEMORY */
 
 #include <kern/thread.h>
@@ -2184,7 +2310,13 @@ SYSCTL_ULONG(_vm, OID_AUTO, vm_max_kernel_address, CTLFLAG_RD, (unsigned long *)
 extern uint32_t vm_page_pages;
 SYSCTL_UINT(_vm, OID_AUTO, pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pages, 0, "");
 
 extern uint32_t vm_page_pages;
 SYSCTL_UINT(_vm, OID_AUTO, pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pages, 0, "");
 
+extern uint32_t vm_page_busy_absent_skipped;
+SYSCTL_UINT(_vm, OID_AUTO, page_busy_absent_skipped, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_busy_absent_skipped, 0, "");
+
 #if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
 #if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
+extern int vm_footprint_suspend_allowed;
+SYSCTL_INT(_vm, OID_AUTO, footprint_suspend_allowed, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_footprint_suspend_allowed, 0, "");
+
 extern void pmap_footprint_suspend(vm_map_t map, boolean_t suspend);
 static int
 sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS
 extern void pmap_footprint_suspend(vm_map_t map, boolean_t suspend);
 static int
 sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS
@@ -2200,10 +2332,65 @@ sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS
        if (error) {
                return error;
        }
        if (error) {
                return error;
        }
+       if (!vm_footprint_suspend_allowed) {
+               if (new_value != 0) {
+                       /* suspends are not allowed... */
+                       return 0;
+               }
+               /* ... but let resumes proceed */
+       }
+       DTRACE_VM2(footprint_suspend,
+                  vm_map_t, current_map(),
+                  int, new_value);
+
        pmap_footprint_suspend(current_map(), new_value);
        pmap_footprint_suspend(current_map(), new_value);
+
        return 0;
 }
 SYSCTL_PROC(_vm, OID_AUTO, footprint_suspend,
            CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED,
            0, 0, &sysctl_vm_footprint_suspend, "I", "");
 #endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */
        return 0;
 }
 SYSCTL_PROC(_vm, OID_AUTO, footprint_suspend,
            CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED,
            0, 0, &sysctl_vm_footprint_suspend, "I", "");
 #endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */
+
+extern uint64_t vm_map_corpse_footprint_count;
+extern uint64_t vm_map_corpse_footprint_size_avg;
+extern uint64_t vm_map_corpse_footprint_size_max;
+extern uint64_t vm_map_corpse_footprint_full;
+extern uint64_t vm_map_corpse_footprint_no_buf;
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_count,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_avg,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_avg, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_max,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_max, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_full,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_full, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_no_buf,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, "");
+
+#if PMAP_CS
+extern uint64_t vm_cs_defer_to_pmap_cs;
+extern uint64_t vm_cs_defer_to_pmap_cs_not;
+SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, "");
+SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs_not,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, "");
+#endif /* PMAP_CS */
+
+extern uint64_t shared_region_pager_copied;
+extern uint64_t shared_region_pager_slid;
+extern uint64_t shared_region_pager_slid_error;
+extern uint64_t shared_region_pager_reclaimed;
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_copied,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_copied, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid_error,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid_error, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_reclaimed,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_reclaimed, "");
+
+#if MACH_ASSERT
+extern int pmap_ledgers_panic_leeway;
+SYSCTL_INT(_vm, OID_AUTO, pmap_ledgers_panic_leeway, CTLFLAG_RW | CTLFLAG_LOCKED, &pmap_ledgers_panic_leeway, 0, "");
+#endif /* MACH_ASSERT */