]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vm/vm_unix.c
xnu-4903.231.4.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
index 1aa66039943dcf2be8eb4d9f1aba8b454bc801c6..d3109c5646d200dd91160196818ea497dd02d7c4 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * is included in support of clause 2.2 (b) of the Apple Public License,
  * Version 2.0.
  */
  * is included in support of clause 2.2 (b) of the Apple Public License,
  * Version 2.0.
  */
-
-#include <meta_features.h>
+#include <vm/vm_options.h>
 
 #include <kern/task.h>
 #include <kern/thread.h>
 #include <kern/debug.h>
 
 #include <kern/task.h>
 #include <kern/thread.h>
 #include <kern/debug.h>
-#include <kern/lock.h>
 #include <kern/extmod_statistics.h>
 #include <mach/mach_traps.h>
 #include <mach/port.h>
 #include <kern/extmod_statistics.h>
 #include <mach/mach_traps.h>
 #include <mach/port.h>
+#include <mach/sdt.h>
 #include <mach/task.h>
 #include <mach/task_access.h>
 #include <mach/task_special_ports.h>
 #include <mach/task.h>
 #include <mach/task_access.h>
 #include <mach/task_special_ports.h>
 #include <sys/cprotect.h>
 #include <sys/kpi_socket.h>
 #include <sys/kas_info.h>
 #include <sys/cprotect.h>
 #include <sys/kpi_socket.h>
 #include <sys/kas_info.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#if NECP
+#include <net/necp.h>
+#endif /* NECP */
 
 #include <security/audit/audit.h>
 #include <security/mac.h>
 
 #include <security/audit/audit.h>
 #include <security/mac.h>
@@ -88,8 +92,6 @@
 #include <vm/vm_kern.h>
 #include <vm/vm_pageout.h>
 
 #include <vm/vm_kern.h>
 #include <vm/vm_pageout.h>
 
-#include <machine/spl.h>
-
 #include <mach/shared_region.h>
 #include <vm/vm_shared_region.h>
 
 #include <mach/shared_region.h>
 #include <vm/vm_shared_region.h>
 
 
 #include <sys/kern_memorystatus.h>
 
 
 #include <sys/kern_memorystatus.h>
 
+#if CONFIG_MACF
+#include <security/mac_framework.h>
+#endif
+
+#if CONFIG_CSR
+#include <sys/csr.h>
+#endif /* CONFIG_CSR */
 
 int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
 int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
 
 
 int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
 int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
 
-SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, "");
+#if VM_MAP_DEBUG_APPLE_PROTECT
+SYSCTL_INT(_vm, OID_AUTO, map_debug_apple_protect, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_map_debug_apple_protect, 0, "");
+#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
+
+#if VM_MAP_DEBUG_FOURK
+SYSCTL_INT(_vm, OID_AUTO, map_debug_fourk, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_map_debug_fourk, 0, "");
+#endif /* VM_MAP_DEBUG_FOURK */
+
+#if DEVELOPMENT || DEBUG
+
+static int
+sysctl_kmem_alloc_contig SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+       vm_offset_t     kaddr;
+       kern_return_t   kr;
+       int     error = 0;
+       int     size = 0;
+
+       error = sysctl_handle_int(oidp, &size, 0, req);
+       if (error || !req->newptr)
+               return (error);
+
+       kr = kmem_alloc_contig(kernel_map, &kaddr, (vm_size_t)size, 0, 0, 0, 0, VM_KERN_MEMORY_IOKIT);
+
+       if (kr == KERN_SUCCESS)
+               kmem_free(kernel_map, kaddr, size);
+
+       return error;
+}
 
 
+SYSCTL_PROC(_vm, OID_AUTO, kmem_alloc_contig, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+           0, 0, &sysctl_kmem_alloc_contig, "I", "");
 
 
+extern int vm_region_footprint;
+SYSCTL_INT(_vm, OID_AUTO, region_footprint, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, &vm_region_footprint, 0, "");
+static int
+sysctl_vm_self_region_footprint SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       int     error = 0;
+       int     value;
+
+       value = task_self_region_footprint();
+       error = SYSCTL_OUT(req, &value, sizeof (int));
+       if (error) {
+               return error;
+       }
+
+       if (!req->newptr) {
+               return 0;
+       }
+
+       error = SYSCTL_IN(req, &value, sizeof (int));
+       if (error) {
+               return (error);
+       }
+       task_self_region_footprint_set(value);
+       return 0;
+}
+SYSCTL_PROC(_vm, OID_AUTO, self_region_footprint, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED, 0, 0, &sysctl_vm_self_region_footprint, "I", "");
+
+#endif /* DEVELOPMENT || DEBUG */
+
+
+#if CONFIG_EMBEDDED
+
+#if DEVELOPMENT || DEBUG
+extern int panic_on_unsigned_execute;
+SYSCTL_INT(_vm, OID_AUTO, panic_on_unsigned_execute, CTLFLAG_RW | CTLFLAG_LOCKED, &panic_on_unsigned_execute, 0, "");
+#endif /* DEVELOPMENT || DEBUG */
+
+extern int log_executable_mem_entry;
+extern int cs_executable_create_upl;
+extern int cs_executable_mem_entry;
+extern int cs_executable_wire;
+SYSCTL_INT(_vm, OID_AUTO, log_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &log_executable_mem_entry, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_create_upl, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_create_upl, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_mem_entry, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_wire, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_wire, 0, "");
+#endif /* CONFIG_EMBEDDED */
+
+#if DEVELOPMENT || DEBUG
+extern int radar_20146450;
+SYSCTL_INT(_vm, OID_AUTO, radar_20146450, CTLFLAG_RW | CTLFLAG_LOCKED, &radar_20146450, 0, "");
+
+extern int macho_printf;
+SYSCTL_INT(_vm, OID_AUTO, macho_printf, CTLFLAG_RW | CTLFLAG_LOCKED, &macho_printf, 0, "");
+
+extern int apple_protect_pager_data_request_debug;
+SYSCTL_INT(_vm, OID_AUTO, apple_protect_pager_data_request_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &apple_protect_pager_data_request_debug, 0, "");
+
+#if __arm__ || __arm64__
+/* These are meant to support the page table accounting unit test. */
+extern unsigned int arm_hardware_page_size;
+extern unsigned int arm_pt_desc_size;
+extern unsigned int arm_pt_root_size;
+extern unsigned int free_page_size_tt_count;
+extern unsigned int free_two_page_size_tt_count;
+extern unsigned int free_tt_count;
+extern unsigned int inuse_user_tteroot_count;
+extern unsigned int inuse_kernel_tteroot_count;
+extern unsigned int inuse_user_ttepages_count;
+extern unsigned int inuse_kernel_ttepages_count;
+extern unsigned int inuse_user_ptepages_count;
+extern unsigned int inuse_kernel_ptepages_count;
+SYSCTL_UINT(_vm, OID_AUTO, native_hw_pagesize, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_hardware_page_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, arm_pt_desc_size, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_pt_desc_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, arm_pt_root_size, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_pt_root_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_1page_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_page_size_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_2page_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_two_page_size_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_tteroot_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_tteroot_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_tte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_ttepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_tte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_ttepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_pte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_ptepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_pte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_ptepages_count, 0, "");
+#endif /* __arm__ || __arm64__ */
+
+#if __arm64__
+extern int fourk_pager_data_request_debug;
+SYSCTL_INT(_vm, OID_AUTO, fourk_pager_data_request_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &fourk_pager_data_request_debug, 0, "");
+#endif /* __arm64__ */
+#endif /* DEVELOPMENT || DEBUG */
+
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor_pages, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_terminate, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_terminate, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_terminate_failure, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_terminate_failure, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_should_cow_but_wired, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.should_cow_but_wired, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_extra_cow, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_extra_cow, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_extra_cow_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_extra_cow_pages, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_lookup_failure_write, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_lookup_failure_write, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_lookup_failure_copy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_lookup_failure_copy, 0, "");
+#if VM_SCAN_FOR_SHADOW_CHAIN
+static int vm_shadow_max_enabled = 0;    /* Disabled by default */
+extern int proc_shadow_max(void);
+static int
+vm_shadow_max SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       int value = 0;
+
+       if (vm_shadow_max_enabled)
+               value = proc_shadow_max();
+
+       return SYSCTL_OUT(req, &value, sizeof(value));
+}
+SYSCTL_PROC(_vm, OID_AUTO, vm_shadow_max, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED,
+    0, 0, &vm_shadow_max, "I", "");
+
+SYSCTL_INT(_vm, OID_AUTO, vm_shadow_max_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_shadow_max_enabled, 0, "");
+
+#endif /* VM_SCAN_FOR_SHADOW_CHAIN */
+
+SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, "");
+
+__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
+       mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid);
 /*
  * Sysctl's related to data/stack execution.  See osfmk/vm/vm_map.c
  */
 
 /*
  * Sysctl's related to data/stack execution.  See osfmk/vm/vm_map.c
  */
 
-#ifndef SECURE_KERNEL
+#if DEVELOPMENT || DEBUG
 extern int allow_stack_exec, allow_data_exec;
 
 SYSCTL_INT(_vm, OID_AUTO, allow_stack_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_stack_exec, 0, "");
 SYSCTL_INT(_vm, OID_AUTO, allow_data_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_data_exec, 0, "");
 extern int allow_stack_exec, allow_data_exec;
 
 SYSCTL_INT(_vm, OID_AUTO, allow_stack_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_stack_exec, 0, "");
 SYSCTL_INT(_vm, OID_AUTO, allow_data_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_data_exec, 0, "");
-#endif /* !SECURE_KERNEL */
+
+#endif /* DEVELOPMENT || DEBUG */
 
 static const char *prot_values[] = {
        "none",
 
 static const char *prot_values[] = {
        "none",
@@ -133,6 +300,13 @@ log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot)
                current_proc()->p_comm, current_proc()->p_pid, vaddr, prot_values[prot & VM_PROT_ALL]);
 }
 
                current_proc()->p_comm, current_proc()->p_pid, vaddr, prot_values[prot & VM_PROT_ALL]);
 }
 
+/*
+ * shared_region_unnest_logging: level of logging of unnesting events
+ * 0   - no logging
+ * 1   - throttled logging of unexpected unnesting events (default)
+ * 2   - unthrottled logging of unexpected unnesting events
+ * 3+  - unthrottled logging of all unnesting events
+ */
 int shared_region_unnest_logging = 1;
 
 SYSCTL_INT(_vm, OID_AUTO, shared_region_unnest_logging, CTLFLAG_RW | CTLFLAG_LOCKED,
 int shared_region_unnest_logging = 1;
 
 SYSCTL_INT(_vm, OID_AUTO, shared_region_unnest_logging, CTLFLAG_RW | CTLFLAG_LOCKED,
@@ -141,34 +315,80 @@ SYSCTL_INT(_vm, OID_AUTO, shared_region_unnest_logging, CTLFLAG_RW | CTLFLAG_LOC
 int vm_shared_region_unnest_log_interval = 10;
 int shared_region_unnest_log_count_threshold = 5;
 
 int vm_shared_region_unnest_log_interval = 10;
 int shared_region_unnest_log_count_threshold = 5;
 
+/*
+ * Shared cache path enforcement.
+ */
+
+#ifndef CONFIG_EMBEDDED
+static int scdir_enforce = 1;
+static char scdir_path[] = "/var/db/dyld/";
+#else
+static int scdir_enforce = 0;
+static char scdir_path[] = "/System/Library/Caches/com.apple.dyld/";
+#endif
+
+#ifndef SECURE_KERNEL
+static int sysctl_scdir_enforce SYSCTL_HANDLER_ARGS
+{
+#if CONFIG_CSR
+       if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0) {
+               printf("Failed attempt to set vm.enforce_shared_cache_dir sysctl\n");
+               return EPERM;
+       }
+#endif /* CONFIG_CSR */
+       return sysctl_handle_int(oidp, arg1, arg2, req);
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, enforce_shared_cache_dir, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, sysctl_scdir_enforce, "I", "");
+#endif
+
 /* These log rate throttling state variables aren't thread safe, but
  * are sufficient unto the task.
  */
 static int64_t last_unnest_log_time = 0; 
 static int shared_region_unnest_log_count = 0;
 
 /* These log rate throttling state variables aren't thread safe, but
  * are sufficient unto the task.
  */
 static int64_t last_unnest_log_time = 0; 
 static int shared_region_unnest_log_count = 0;
 
-void log_unnest_badness(vm_map_t m, vm_map_offset_t s, vm_map_offset_t e) {
-       struct timeval tv;
-       const char *pcommstr;
+void
+log_unnest_badness(
+       vm_map_t        m,
+       vm_map_offset_t s,
+       vm_map_offset_t e,
+       boolean_t       is_nested_map,
+       vm_map_offset_t lowest_unnestable_addr)
+{
+       struct timeval  tv;
 
        if (shared_region_unnest_logging == 0)
                return;
 
 
        if (shared_region_unnest_logging == 0)
                return;
 
-       if (shared_region_unnest_logging == 1) {
+       if (shared_region_unnest_logging <= 2 &&
+           is_nested_map &&
+           s >= lowest_unnestable_addr) {
+               /*
+                * Unnesting of writable map entries is fine.
+                */
+               return;
+       }
+
+       if (shared_region_unnest_logging <= 1) {
                microtime(&tv);
                microtime(&tv);
-               if ((tv.tv_sec - last_unnest_log_time) < vm_shared_region_unnest_log_interval) {
-                       if (shared_region_unnest_log_count++ > shared_region_unnest_log_count_threshold)
+               if ((tv.tv_sec - last_unnest_log_time) <
+                   vm_shared_region_unnest_log_interval) {
+                       if (shared_region_unnest_log_count++ >
+                           shared_region_unnest_log_count_threshold)
                                return;
                                return;
-               }
-               else {
+               } else {
                        last_unnest_log_time = tv.tv_sec;
                        shared_region_unnest_log_count = 0;
                }
        }
 
                        last_unnest_log_time = tv.tv_sec;
                        shared_region_unnest_log_count = 0;
                }
        }
 
-       pcommstr = current_proc()->p_comm;
-
-       printf("%s (map: %p) triggered DYLD shared region unnest for map: %p, region 0x%qx->0x%qx. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, get_task_map(current_proc()->task), m, (uint64_t)s, (uint64_t)e);
+       DTRACE_VM4(log_unnest_badness,
+                  vm_map_t, m,
+                  vm_map_offset_t, s,
+                  vm_map_offset_t, e,
+                  vm_map_offset_t, lowest_unnestable_addr);
+       printf("%s[%d] triggered unnest of range 0x%qx->0x%qx of DYLD shared region in VM map %p. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, current_proc()->p_pid, (uint64_t)s, (uint64_t)e, (void *) VM_KERNEL_ADDRPERM(m));
 }
 
 int
 }
 
 int
@@ -198,12 +418,12 @@ vslock(
        vm_map_t        map;
 
        map = current_map();
        vm_map_t        map;
 
        map = current_map();
-       kret = vm_map_wire(map,
+       kret = vm_map_wire_kernel(map,
                           vm_map_trunc_page(addr,
                                             vm_map_page_mask(map)),
                           vm_map_round_page(addr+len,
                                             vm_map_page_mask(map)), 
                           vm_map_trunc_page(addr,
                                             vm_map_page_mask(map)),
                           vm_map_round_page(addr+len,
                                             vm_map_page_mask(map)), 
-                          VM_PROT_READ | VM_PROT_WRITE,
+                          VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_BSD,
                           FALSE);
 
        switch (kret) {
                           FALSE);
 
        switch (kret) {
@@ -437,7 +657,7 @@ pid_for_task(
        AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
        AUDIT_ARG(mach_port1, t);
 
        AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
        AUDIT_ARG(mach_port1, t);
 
-       t1 = port_name_to_task(t);
+       t1 = port_name_to_task_inspect(t);
 
        if (t1 == TASK_NULL) {
                err = KERN_FAILURE;
 
        if (t1 == TASK_NULL) {
                err = KERN_FAILURE;
@@ -447,7 +667,10 @@ pid_for_task(
                if (p) {
                        pid  = proc_pid(p);
                        err = KERN_SUCCESS;
                if (p) {
                        pid  = proc_pid(p);
                        err = KERN_SUCCESS;
-               } else {
+               } else if (is_corpsetask(t1)) {
+                       pid = task_pid(t1);
+                       err = KERN_SUCCESS;
+               }else {
                        err = KERN_FAILURE;
                }
        }
                        err = KERN_FAILURE;
                }
        }
@@ -544,6 +767,19 @@ out:
        return allowed;
 }
 
        return allowed;
 }
 
+/*
+ *     __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__
+ *
+ *     Description:    Waits for the user space daemon to respond to the request
+ *                     we made. Function declared non inline to be visible in
+ *                     stackshots and spindumps as well as debugging.
+ */
+__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
+       mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid)
+{
+       return check_task_access(task_access_port, calling_pid, calling_gid, target_pid);
+}
+
 /*
  *     Routine:        task_for_pid
  *     Purpose:
 /*
  *     Routine:        task_for_pid
  *     Purpose:
@@ -566,8 +802,9 @@ task_for_pid(
        user_addr_t             task_addr = args->t;
        proc_t                  p = PROC_NULL;
        task_t                  t1 = TASK_NULL;
        user_addr_t             task_addr = args->t;
        proc_t                  p = PROC_NULL;
        task_t                  t1 = TASK_NULL;
+       task_t                  task = TASK_NULL;
        mach_port_name_t        tret = MACH_PORT_NULL;
        mach_port_name_t        tret = MACH_PORT_NULL;
-       ipc_port_t              tfpport;
+       ipc_port_t              tfpport = MACH_PORT_NULL;
        void * sright;
        int error = 0;
 
        void * sright;
        int error = 0;
 
@@ -605,52 +842,86 @@ task_for_pid(
                goto tfpout;
        }
 
                goto tfpout;
        }
 
-       if (p->task != TASK_NULL) {
-               /* If we aren't root and target's task access port is set... */
-               if (!kauth_cred_issuser(kauth_cred_get()) &&
-                       p != current_proc() &&
-                       (task_get_task_access_port(p->task, &tfpport) == 0) &&
-                       (tfpport != IPC_PORT_NULL)) {
+       if (p->task == TASK_NULL) {
+               error = KERN_SUCCESS;
+               goto tfpout;
+       }
 
 
-                       if (tfpport == IPC_PORT_DEAD) {
-                               error = KERN_PROTECTION_FAILURE;
-                               goto tfpout;
-                       }
+#if CONFIG_MACF
+       error = mac_proc_check_get_task(kauth_cred_get(), p);
+       if (error) {
+               error = KERN_FAILURE;
+               goto tfpout;
+       }
+#endif
 
 
-                       /* Call up to the task access server */
-                       error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+       /* Grab a task reference since the proc ref might be dropped if an upcall to task access server is made */
+       task = p->task;
+       task_reference(task);
 
 
-                       if (error != MACH_MSG_SUCCESS) {
-                               if (error == MACH_RCV_INTERRUPTED)
-                                       error = KERN_ABORTED;
-                               else
-                                       error = KERN_FAILURE;
-                               goto tfpout;
-                       }
+       /* If we aren't root and target's task access port is set... */
+       if (!kauth_cred_issuser(kauth_cred_get()) &&
+               p != current_proc() &&
+               (task_get_task_access_port(task, &tfpport) == 0) &&
+               (tfpport != IPC_PORT_NULL)) {
+
+               if (tfpport == IPC_PORT_DEAD) {
+                       error = KERN_PROTECTION_FAILURE;
+                       goto tfpout;
                }
                }
-#if CONFIG_MACF
-               error = mac_proc_check_get_task(kauth_cred_get(), p);
-               if (error) {
-                       error = KERN_FAILURE;
+
+               /*
+                * Drop the proc_find proc ref before making an upcall
+                * to taskgated, since holding a proc_find
+                * ref while making an upcall can cause deadlock.
+                */
+               proc_rele(p);
+               p = PROC_NULL;
+
+               /* Call up to the task access server */
+               error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
+
+               if (error != MACH_MSG_SUCCESS) {
+                       if (error == MACH_RCV_INTERRUPTED)
+                               error = KERN_ABORTED;
+                       else
+                               error = KERN_FAILURE;
                        goto tfpout;
                }
                        goto tfpout;
                }
-#endif
+       }
 
 
-               /* Grant task port access */
-               task_reference(p->task);
-               extmod_statistics_incr_task_for_pid(p->task);
+       /* Grant task port access */
+       extmod_statistics_incr_task_for_pid(task);
+       sright = (void *) convert_task_to_port(task);
+
+       /* Check if the task has been corpsified */
+       if (is_corpsetask(task)) {
+               /* task ref consumed by convert_task_to_port */
+               task = TASK_NULL;
+               ipc_port_release_send(sright);
+               error = KERN_FAILURE;
+               goto tfpout;
+       }
+
+       /* task ref consumed by convert_task_to_port */
+       task = TASK_NULL;
+       tret = ipc_port_copyout_send(
+                       sright,
+                       get_task_ipcspace(current_task()));
 
 
-               sright = (void *) convert_task_to_port(p->task);
-               tret = ipc_port_copyout_send(
-                               sright, 
-                               get_task_ipcspace(current_task()));
-       } 
        error = KERN_SUCCESS;
 
 tfpout:
        task_deallocate(t1);
        AUDIT_ARG(mach_port2, tret);
        (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
        error = KERN_SUCCESS;
 
 tfpout:
        task_deallocate(t1);
        AUDIT_ARG(mach_port2, tret);
        (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
+
+       if (tfpport != IPC_PORT_NULL) {
+               ipc_port_release_send(tfpport);
+       }
+       if (task != TASK_NULL) {
+               task_deallocate(task);
+       }
        if (p != PROC_NULL)
                proc_rele(p);
        AUDIT_MACH_SYSCALL_EXIT(error);
        if (p != PROC_NULL)
                proc_rele(p);
        AUDIT_MACH_SYSCALL_EXIT(error);
@@ -778,6 +1049,7 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret)
        }
 
        target = targetproc->task;
        }
 
        target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
        if (target != TASK_NULL) {
                mach_port_t tfpport;
 
        if (target != TASK_NULL) {
                mach_port_t tfpport;
 
@@ -793,7 +1065,7 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret)
                        }
 
                        /* Call up to the task access server */
                        }
 
                        /* Call up to the task access server */
-                       error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+                       error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
 
                        if (error != MACH_MSG_SUCCESS) {
                                if (error == MACH_RCV_INTERRUPTED)
 
                        if (error != MACH_MSG_SUCCESS) {
                                if (error == MACH_RCV_INTERRUPTED)
@@ -804,6 +1076,7 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret)
                        }
                }
        }
                        }
                }
        }
+#endif
 
        task_reference(target);
        error = task_pidsuspend(target);
 
        task_reference(target);
        error = task_pidsuspend(target);
@@ -862,6 +1135,7 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret)
        }
 
        target = targetproc->task;
        }
 
        target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
        if (target != TASK_NULL) {
                mach_port_t tfpport;
 
        if (target != TASK_NULL) {
                mach_port_t tfpport;
 
@@ -877,7 +1151,7 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret)
                        }
 
                        /* Call up to the task access server */
                        }
 
                        /* Call up to the task access server */
-                       error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+                       error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
 
                        if (error != MACH_MSG_SUCCESS) {
                                if (error == MACH_RCV_INTERRUPTED)
 
                        if (error != MACH_MSG_SUCCESS) {
                                if (error == MACH_RCV_INTERRUPTED)
@@ -888,6 +1162,13 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret)
                        }
                }
        }
                        }
                }
        }
+#endif
+
+#if CONFIG_EMBEDDED
+#if SOCKETS
+       resume_proc_sockets(targetproc);
+#endif /* SOCKETS */
+#endif /* CONFIG_EMBEDDED */
 
        task_reference(target);
 
 
        task_reference(target);
 
@@ -918,6 +1199,202 @@ out:
        return error;
 }
 
        return error;
 }
 
+#if CONFIG_EMBEDDED
+/*
+ * Freeze the specified process (provided in args->pid), or find and freeze a PID.
+ * When a process is specified, this call is blocking, otherwise we wake up the
+ * freezer thread and do not block on a process being frozen.
+ */
+kern_return_t
+pid_hibernate(struct proc *p __unused, struct pid_hibernate_args *args, int *ret)
+{
+       int     error = 0;
+       proc_t  targetproc = PROC_NULL;
+       int     pid = args->pid;
+
+#ifndef CONFIG_FREEZE
+       #pragma unused(pid)
+#else
+
+#if CONFIG_MACF
+       error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_HIBERNATE);
+       if (error) {
+               error = EPERM;
+               goto out;
+       }
+#endif
+
+       /*
+        * If a pid has been provided, we obtain the process handle and call task_for_pid_posix_check().
+        */
+
+       if (pid >= 0) {
+               targetproc = proc_find(pid);
+
+               if (targetproc == PROC_NULL) {
+                       error = ESRCH;
+                       goto out;
+               }
+
+               if (!task_for_pid_posix_check(targetproc)) {
+                       error = EPERM;
+                       goto out;
+               }
+       }
+
+       if (pid == -2) {
+               vm_pageout_anonymous_pages();
+       } else if (pid == -1) {
+               memorystatus_on_inactivity(targetproc);
+       } else {
+               error = memorystatus_freeze_process_sync(targetproc);
+       }
+
+out:
+
+#endif /* CONFIG_FREEZE */
+
+       if (targetproc != PROC_NULL)
+               proc_rele(targetproc);
+       *ret = error;
+       return error;
+}
+#endif /* CONFIG_EMBEDDED */
+
+#if SOCKETS
+int
+networking_memstatus_callout(proc_t p, uint32_t status)
+{
+       struct filedesc *fdp;
+       int i;
+
+       /*
+        * proc list lock NOT held
+        * proc lock NOT held
+        * a reference on the proc has been held / shall be dropped by the caller.
+        */
+       LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+       LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
+
+       proc_fdlock(p);
+       fdp = p->p_fd;
+       for (i = 0; i < fdp->fd_nfiles; i++) {
+               struct fileproc *fp;
+
+               fp = fdp->fd_ofiles[i];
+               if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
+                       continue;
+               }
+               switch (FILEGLOB_DTYPE(fp->f_fglob)) {
+#if NECP
+               case DTYPE_NETPOLICY:
+                       necp_fd_memstatus(p, status,
+                           (struct necp_fd_data *)fp->f_fglob->fg_data);
+                       break;
+#endif /* NECP */
+               default:
+                       break;
+               }
+       }
+       proc_fdunlock(p);
+
+       return (1);
+}
+
+
+static int
+networking_defunct_callout(proc_t p, void *arg)
+{
+       struct pid_shutdown_sockets_args *args = arg;
+       int pid = args->pid;
+       int level = args->level;
+       struct filedesc *fdp;
+       int i;
+
+       proc_fdlock(p);
+       fdp = p->p_fd;
+       for (i = 0; i < fdp->fd_nfiles; i++) {
+               struct fileproc *fp = fdp->fd_ofiles[i];
+               struct fileglob *fg;
+
+               if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
+                       continue;
+               }
+
+               fg = fp->f_fglob;
+               switch (FILEGLOB_DTYPE(fg)) {
+               case DTYPE_SOCKET: {
+                       struct socket *so = (struct socket *)fg->fg_data;
+                       if (p->p_pid == pid || so->last_pid == pid || 
+                           ((so->so_flags & SOF_DELEGATED) && so->e_pid == pid)) {
+                               /* Call networking stack with socket and level */
+                               (void) socket_defunct(p, so, level);
+                       }
+                       break;
+               }
+#if NECP
+               case DTYPE_NETPOLICY:
+                       /* first pass: defunct necp and get stats for ntstat */
+                       if (p->p_pid == pid) {
+                               necp_fd_defunct(p,
+                                   (struct necp_fd_data *)fg->fg_data);
+                       }
+                       break;
+#endif /* NECP */
+               default:
+                       break;
+               }
+       }
+
+       proc_fdunlock(p);
+
+       return (PROC_RETURNED);
+}
+
+int
+pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args *args, int *ret)
+{
+       int                             error = 0;
+       proc_t                          targetproc = PROC_NULL;
+       int                             pid = args->pid;
+       int                             level = args->level;
+
+       if (level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC &&
+           level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) {
+               error = EINVAL;
+               goto out;
+       }
+
+#if CONFIG_MACF
+       error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_SHUTDOWN_SOCKETS);
+       if (error) {
+               error = EPERM;
+               goto out;
+       }
+#endif
+
+       targetproc = proc_find(pid);
+       if (targetproc == PROC_NULL) {
+               error = ESRCH;
+               goto out;
+       }
+
+       if (!task_for_pid_posix_check(targetproc)) {
+               error = EPERM;
+               goto out;
+       }
+
+       proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS,
+           networking_defunct_callout, args, NULL, NULL);
+
+out:
+       if (targetproc != PROC_NULL)
+               proc_rele(targetproc);
+       *ret = error;
+       return error;
+}
+
+#endif /* SOCKETS */
 
 static int
 sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1,
 
 static int
 sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1,
@@ -1004,7 +1481,8 @@ shared_region_check_np(
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n",
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n",
-                current_thread(), p->p_pid, p->p_comm,
+                (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                p->p_pid, p->p_comm,
                 (uint64_t)uap->start_address));
 
        /* retrieve the current tasks's shared region */
                 (uint64_t)uap->start_address));
 
        /* retrieve the current tasks's shared region */
@@ -1025,7 +1503,8 @@ shared_region_check_np(
                                        ("shared_region: %p [%d(%s)] "
                                         "check_np(0x%llx) "
                                         "copyout(0x%llx) error %d\n",
                                        ("shared_region: %p [%d(%s)] "
                                         "check_np(0x%llx) "
                                         "copyout(0x%llx) error %d\n",
-                                        current_thread(), p->p_pid, p->p_comm,
+                                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                                        p->p_pid, p->p_comm,
                                         (uint64_t)uap->start_address, (uint64_t)start_address,
                                         error));
                        }
                                         (uint64_t)uap->start_address, (uint64_t)start_address,
                                         error));
                        }
@@ -1038,7 +1517,8 @@ shared_region_check_np(
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] check_np(0x%llx) <- 0x%llx %d\n",
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] check_np(0x%llx) <- 0x%llx %d\n",
-                current_thread(), p->p_pid, p->p_comm,
+                (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                p->p_pid, p->p_comm,
                 (uint64_t)uap->start_address, (uint64_t)start_address, error));
 
        return error;
                 (uint64_t)uap->start_address, (uint64_t)start_address, error));
 
        return error;
@@ -1064,7 +1544,8 @@ shared_region_copyin_mappings(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(): "
                         "copyin(0x%llx, %d) failed (error=%d)\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(): "
                         "copyin(0x%llx, %d) failed (error=%d)\n",
-                        current_thread(), p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
                         (uint64_t)user_mappings, mappings_count, error));
        }
        return error;
                         (uint64_t)user_mappings, mappings_count, error));
        }
        return error;
@@ -1092,7 +1573,7 @@ _shared_region_map_and_slide(
        int                             error;
        kern_return_t                   kr;
        struct fileproc                 *fp;
        int                             error;
        kern_return_t                   kr;
        struct fileproc                 *fp;
-       struct vnode                    *vp, *root_vp;
+       struct vnode                    *vp, *root_vp, *scdir_vp;
        struct vnode_attr               va;
        off_t                           fs;
        memory_object_size_t            file_size;
        struct vnode_attr               va;
        off_t                           fs;
        memory_object_size_t            file_size;
@@ -1101,14 +1582,17 @@ _shared_region_map_and_slide(
 #endif
        memory_object_control_t         file_control;
        struct vm_shared_region         *shared_region;
 #endif
        memory_object_control_t         file_control;
        struct vm_shared_region         *shared_region;
+       uint32_t                        i;
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] -> map\n",
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] -> map\n",
-                current_thread(), p->p_pid, p->p_comm));
+                (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                p->p_pid, p->p_comm));
 
        shared_region = NULL;
        fp = NULL;
        vp = NULL;
 
        shared_region = NULL;
        fp = NULL;
        vp = NULL;
+       scdir_vp = NULL;
 
        /* get file structure from file descriptor */
        error = fp_lookup(p, fd, &fp, 0);
 
        /* get file structure from file descriptor */
        error = fp_lookup(p, fd, &fp, 0);
@@ -1116,7 +1600,8 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d lookup failed (error=%d)\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d lookup failed (error=%d)\n",
-                        current_thread(), p->p_pid, p->p_comm, fd, error));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm, fd, error));
                goto done;
        }
 
                goto done;
        }
 
@@ -1125,7 +1610,8 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d not a vnode (type=%d)\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d not a vnode (type=%d)\n",
-                        current_thread(), p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
                         fd, FILEGLOB_DTYPE(fp->f_fglob)));
                error = EINVAL;
                goto done;
                         fd, FILEGLOB_DTYPE(fp->f_fglob)));
                error = EINVAL;
                goto done;
@@ -1136,7 +1622,8 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d not readable\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d not readable\n",
-                        current_thread(), p->p_pid, p->p_comm, fd));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm, fd));
                error = EPERM;
                goto done;
        }
                error = EPERM;
                goto done;
        }
@@ -1147,7 +1634,8 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d getwithref failed (error=%d)\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map: "
                         "fd=%d getwithref failed (error=%d)\n",
-                        current_thread(), p->p_pid, p->p_comm, fd, error));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm, fd, error));
                goto done;
        }
        vp = (struct vnode *) fp->f_fglob->fg_data;
                goto done;
        }
        vp = (struct vnode *) fp->f_fglob->fg_data;
@@ -1157,30 +1645,24 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "not a file (type=%d)\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "not a file (type=%d)\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name, vp->v_type));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp),
+                        vp->v_name, vp->v_type));
                error = EINVAL;
                goto done;
        }
 
 #if CONFIG_MACF
                error = EINVAL;
                goto done;
        }
 
 #if CONFIG_MACF
+       /* pass in 0 for the offset argument because AMFI does not need the offset
+               of the shared cache */
        error = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()),
        error = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()),
-                       fp->f_fglob, VM_PROT_ALL, MAP_FILE, &maxprot);
+                       fp->f_fglob, VM_PROT_ALL, MAP_FILE, 0, &maxprot);
        if (error) {
                goto done;
        }
 #endif /* MAC */
 
        if (error) {
                goto done;
        }
 #endif /* MAC */
 
-#if CONFIG_PROTECT
-       /* check for content protection access */
-       {
-               error = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0);
-               if (error) { 
-                       goto done;
-               }
-       }
-#endif /* CONFIG_PROTECT */
-
        /* make sure vnode is on the process's root volume */
        root_vp = p->p_fd->fd_rdir;
        if (root_vp == NULL) {
        /* make sure vnode is on the process's root volume */
        root_vp = p->p_fd->fd_rdir;
        if (root_vp == NULL) {
@@ -1197,8 +1679,9 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "not on process's root volume\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "not on process's root volume\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
                error = EPERM;
                goto done;
        }
                error = EPERM;
                goto done;
        }
@@ -1211,28 +1694,62 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "vnode_getattr(%p) failed (error=%d)\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "vnode_getattr(%p) failed (error=%d)\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name, vp, error));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+                        (void *)VM_KERNEL_ADDRPERM(vp), error));
                goto done;
        }
        if (va.va_uid != 0) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "owned by uid=%d instead of 0\n",
                goto done;
        }
        if (va.va_uid != 0) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "owned by uid=%d instead of 0\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name, va.va_uid));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp),
+                        vp->v_name, va.va_uid));
                error = EPERM;
                goto done;
        }
 
                error = EPERM;
                goto done;
        }
 
+       if (scdir_enforce) {
+               /* get vnode for scdir_path */
+               error = vnode_lookup(scdir_path, 0, &scdir_vp, vfs_context_current());
+               if (error) {
+                       SHARED_REGION_TRACE_ERROR(
+                               ("shared_region: %p [%d(%s)] map(%p:'%s'): "
+                                "vnode_lookup(%s) failed (error=%d)\n",
+                                (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                                p->p_pid, p->p_comm,
+                                (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+                                scdir_path, error));
+                       goto done;
+               }
+
+               /* ensure parent is scdir_vp */
+               if (vnode_parent(vp) != scdir_vp) {
+                       SHARED_REGION_TRACE_ERROR(
+                               ("shared_region: %p [%d(%s)] map(%p:'%s'): "
+                                "shared cache file not in %s\n",
+                                (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                                p->p_pid, p->p_comm,
+                                (void *)VM_KERNEL_ADDRPERM(vp),
+                                vp->v_name, scdir_path));
+                       error = EPERM;
+                       goto done;
+               }
+       }
+
        /* get vnode size */
        error = vnode_size(vp, &fs, vfs_context_current());
        if (error) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "vnode_size(%p) failed (error=%d)\n",
        /* get vnode size */
        error = vnode_size(vp, &fs, vfs_context_current());
        if (error) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "vnode_size(%p) failed (error=%d)\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name, vp, error));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+                        (void *)VM_KERNEL_ADDRPERM(vp), error));
                goto done;
        }
        file_size = fs;
                goto done;
        }
        file_size = fs;
@@ -1243,21 +1760,54 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "no memory object\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "no memory object\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
                error = EINVAL;
                goto done;
        }
 
                error = EINVAL;
                goto done;
        }
 
+       /* check that the mappings are properly covered by code signatures */
+       if (!cs_system_enforcement()) {
+               /* code signing is not enforced: no need to check */
+       } else for (i = 0; i < mappings_count; i++) {
+               if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
+                       /* zero-filled mapping: not backed by the file */
+                       continue;
+               }
+               if (ubc_cs_is_range_codesigned(vp,
+                                              mappings[i].sfm_file_offset,
+                                              mappings[i].sfm_size)) {
+                       /* this mapping is fully covered by code signatures */
+                       continue;
+               }
+               SHARED_REGION_TRACE_ERROR(
+                       ("shared_region: %p [%d(%s)] map(%p:'%s'): "
+                        "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] "
+                        "is not code-signed\n",
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+                        i, mappings_count,
+                        mappings[i].sfm_address,
+                        mappings[i].sfm_size,
+                        mappings[i].sfm_file_offset,
+                        mappings[i].sfm_max_prot,
+                        mappings[i].sfm_init_prot));
+               error = EINVAL;
+               goto done;
+       }
 
        /* get the process's shared region (setup in vm_map_exec()) */
 
        /* get the process's shared region (setup in vm_map_exec()) */
-       shared_region = vm_shared_region_get(current_task());
+       shared_region = vm_shared_region_trim_and_get(current_task());
        if (shared_region == NULL) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "no shared region\n",
        if (shared_region == NULL) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "no shared region\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
+               error = EINVAL;
                goto done;
        }
 
                goto done;
        }
 
@@ -1275,8 +1825,9 @@ _shared_region_map_and_slide(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "vm_shared_region_map_file() failed kr=0x%x\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                         "vm_shared_region_map_file() failed kr=0x%x\n",
-                        current_thread(), p->p_pid, p->p_comm,
-                        vp, vp->v_name, kr));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, kr));
                switch (kr) {
                case KERN_INVALID_ADDRESS:
                        error = EFAULT;
                switch (kr) {
                case KERN_INVALID_ADDRESS:
                        error = EFAULT;
@@ -1331,6 +1882,10 @@ done:
                fp_drop(p, fd, fp, 0);
                fp = NULL;
        }
                fp_drop(p, fd, fp, 0);
                fp = NULL;
        }
+       if (scdir_vp != NULL) {
+               (void)vnode_put(scdir_vp);
+               scdir_vp = NULL;
+       }
 
        if (shared_region != NULL) {
                vm_shared_region_deallocate(shared_region);
 
        if (shared_region != NULL) {
                vm_shared_region_deallocate(shared_region);
@@ -1338,7 +1893,8 @@ done:
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] <- map\n",
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] <- map\n",
-                current_thread(), p->p_pid, p->p_comm));
+                (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                p->p_pid, p->p_comm));
 
        return error;
 }
 
        return error;
 }
@@ -1379,7 +1935,8 @@ shared_region_map_and_slide_np(
                SHARED_REGION_TRACE_INFO(
                        ("shared_region: %p [%d(%s)] map(): "
                         "no mappings\n",
                SHARED_REGION_TRACE_INFO(
                        ("shared_region: %p [%d(%s)] map(): "
                         "no mappings\n",
-                        current_thread(), p->p_pid, p->p_comm));
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm));
                kr = 0; /* no mappings: we're done ! */
                goto done;
        } else if (mappings_count <= SFM_MAX_STACK) {
                kr = 0; /* no mappings: we're done ! */
                goto done;
        } else if (mappings_count <= SFM_MAX_STACK) {
@@ -1388,7 +1945,8 @@ shared_region_map_and_slide_np(
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(): "
                         "too many mappings (%d)\n",
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(): "
                         "too many mappings (%d)\n",
-                        current_thread(), p->p_pid, p->p_comm,
+                        (void *)VM_KERNEL_ADDRPERM(current_thread()),
+                        p->p_pid, p->p_comm,
                         mappings_count));
                kr = KERN_FAILURE;
                goto done;
                         mappings_count));
                kr = KERN_FAILURE;
                goto done;
@@ -1412,6 +1970,9 @@ done:
 
 /* sysctl overflow room */
 
 
 /* sysctl overflow room */
 
+SYSCTL_INT (_vm, OID_AUTO, pagesize, CTLFLAG_RD | CTLFLAG_LOCKED,
+           (int *) &page_size, 0, "vm page size");
+
 /* vm_page_free_target is provided as a makeshift solution for applications that want to
        allocate buffer space, possibly purgeable memory, but not cause inactive pages to be
        reclaimed. It allows the app to calculate how much memory is free outside the free target. */
 /* vm_page_free_target is provided as a makeshift solution for applications that want to
        allocate buffer space, possibly purgeable memory, but not cause inactive pages to be
        reclaimed. It allows the app to calculate how much memory is free outside the free target. */
@@ -1419,9 +1980,8 @@ extern unsigned int       vm_page_free_target;
 SYSCTL_INT(_vm, OID_AUTO, vm_page_free_target, CTLFLAG_RD | CTLFLAG_LOCKED, 
                   &vm_page_free_target, 0, "Pageout daemon free target");
 
 SYSCTL_INT(_vm, OID_AUTO, vm_page_free_target, CTLFLAG_RD | CTLFLAG_LOCKED, 
                   &vm_page_free_target, 0, "Pageout daemon free target");
 
-extern unsigned int    vm_memory_pressure;
 SYSCTL_INT(_vm, OID_AUTO, memory_pressure, CTLFLAG_RD | CTLFLAG_LOCKED,
 SYSCTL_INT(_vm, OID_AUTO, memory_pressure, CTLFLAG_RD | CTLFLAG_LOCKED,
-          &vm_memory_pressure, 0, "Memory pressure indicator");
+          &vm_pageout_state.vm_memory_pressure, 0, "Memory pressure indicator");
 
 static int
 vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS
 
 static int
 vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS
@@ -1444,6 +2004,43 @@ extern unsigned int      vm_page_purgeable_wired_count;
 SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_purgeable_wired_count, 0, "Wired purgeable page count");
 
 SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_purgeable_wired_count, 0, "Wired purgeable page count");
 
+#if DEVELOPMENT || DEBUG
+extern uint64_t get_pages_grabbed_count(void);
+
+static int
+pages_grabbed SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+       uint64_t value = get_pages_grabbed_count();
+       return SYSCTL_OUT(req, &value, sizeof(value));
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, pages_grabbed, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
+           0, 0, &pages_grabbed, "QU", "Total pages grabbed");
+SYSCTL_ULONG(_vm, OID_AUTO, pages_freed, CTLFLAG_RD | CTLFLAG_LOCKED,
+            &vm_pageout_vminfo.vm_page_pages_freed, "Total pages freed");
+
+SYSCTL_INT(_vm, OID_AUTO, pageout_purged_objects, CTLFLAG_RD | CTLFLAG_LOCKED,
+          &vm_pageout_debug.vm_pageout_purged_objects, 0, "System purged object count");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)");
+
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_vminfo.vm_pageout_freed_cleaned, "Cleaned pages freed");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED,
+           &vm_pageout_debug.vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */
+#endif
+
 extern int madvise_free_debug;
 SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
           &madvise_free_debug, 0, "zero-fill on madvise(MADV_FREE*)");
 extern int madvise_free_debug;
 SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
           &madvise_free_debug, 0, "zero-fill on madvise(MADV_FREE*)");
@@ -1454,7 +2051,7 @@ SYSCTL_QUAD(_vm, OID_AUTO, reusable_success, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_pages_success, "");
 SYSCTL_QUAD(_vm, OID_AUTO, reusable_failure, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_pages_failure, "");
           &vm_page_stats_reusable.reusable_pages_success, "");
 SYSCTL_QUAD(_vm, OID_AUTO, reusable_failure, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_pages_failure, "");
-SYSCTL_QUAD(_vm, OID_AUTO, reusable_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_pages_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_pages_shared, "");
 SYSCTL_QUAD(_vm, OID_AUTO, all_reusable_calls, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.all_reusable_calls, "");
           &vm_page_stats_reusable.reusable_pages_shared, "");
 SYSCTL_QUAD(_vm, OID_AUTO, all_reusable_calls, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.all_reusable_calls, "");
@@ -1474,6 +2071,12 @@ SYSCTL_QUAD(_vm, OID_AUTO, can_reuse_failure, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.can_reuse_failure, "");
 SYSCTL_QUAD(_vm, OID_AUTO, reusable_reclaimed, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_reclaimed, "");
           &vm_page_stats_reusable.can_reuse_failure, "");
 SYSCTL_QUAD(_vm, OID_AUTO, reusable_reclaimed, CTLFLAG_RD | CTLFLAG_LOCKED,
           &vm_page_stats_reusable.reusable_reclaimed, "");
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_nonwritable, CTLFLAG_RD | CTLFLAG_LOCKED,
+          &vm_page_stats_reusable.reusable_nonwritable, "");
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+          &vm_page_stats_reusable.reusable_shared, "");
+SYSCTL_QUAD(_vm, OID_AUTO, free_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+          &vm_page_stats_reusable.free_shared, "");
 
 
 extern unsigned int vm_page_free_count, vm_page_speculative_count;
 
 
 extern unsigned int vm_page_free_count, vm_page_speculative_count;
@@ -1483,36 +2086,79 @@ SYSCTL_UINT(_vm, OID_AUTO, page_speculative_count, CTLFLAG_RD | CTLFLAG_LOCKED,
 extern unsigned int vm_page_cleaned_count;
 SYSCTL_UINT(_vm, OID_AUTO, page_cleaned_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_cleaned_count, 0, "Cleaned queue size");
 
 extern unsigned int vm_page_cleaned_count;
 SYSCTL_UINT(_vm, OID_AUTO, page_cleaned_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_cleaned_count, 0, "Cleaned queue size");
 
+extern unsigned int vm_page_pageable_internal_count, vm_page_pageable_external_count;
+SYSCTL_UINT(_vm, OID_AUTO, page_pageable_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_internal_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_pageable_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_external_count, 0, "");
+
 /* pageout counts */
 /* pageout counts */
-extern unsigned int vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external, vm_pageout_inactive_clean, vm_pageout_speculative_clean, vm_pageout_inactive_used;
-extern unsigned int vm_pageout_freed_from_inactive_clean, vm_pageout_freed_from_speculative;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_internal, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_dirty_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_dirty_external, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_speculative_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_speculative_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_used, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_inactive_used, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_speculative, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_speculative, 0, "");
-
-extern unsigned int vm_pageout_freed_from_cleaned;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_cleaned, 0, "");
-
-/* counts of pages entering the cleaned queue */
-extern unsigned int vm_pageout_enqueued_cleaned, vm_pageout_enqueued_cleaned_from_inactive_clean, vm_pageout_enqueued_cleaned_from_inactive_dirty;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_clean, 0, "");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_dirty, 0, "");
-
-/* counts of pages leaving the cleaned queue */
-extern unsigned int vm_pageout_cleaned_reclaimed, vm_pageout_cleaned_reactivated, vm_pageout_cleaned_reference_reactivated, vm_pageout_cleaned_volatile_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated, vm_pageout_cleaned_busy, vm_pageout_cleaned_nolock;
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reclaimed, 0, "Cleaned pages reclaimed");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_commit_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_commit_reactivated, 0, "Cleaned pages commit reactivated");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)");
-SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_pageout_inactive_clean, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, pageout_inactive_used, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_pageout_inactive_used, 0, "");
+
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_inactive_dirty_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_inactive_dirty_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_inactive_dirty_external, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_speculative_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_speculative, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_external, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_speculative, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_speculative, "");
+SYSCTL_ULONG(_vm, OID_AUTO, pageout_freed_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_freed_cleaned, "");
+
+
+/* counts of pages prefaulted when entering a memory object */
+extern int64_t vm_prefault_nb_pages, vm_prefault_nb_bailout;
+SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_pages, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_pages, "");
+SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_bailout, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_bailout, "");
+
+#if defined (__x86_64__)
+extern unsigned int vm_clump_promote_threshold;
+SYSCTL_UINT(_vm, OID_AUTO, vm_clump_promote_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_clump_promote_threshold, 0, "clump size threshold for promotes");
+#if DEVELOPMENT || DEBUG
+extern unsigned long vm_clump_stats[];
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats1, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[1], "free page allocations from clump of 1 page");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats2, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[2], "free page allocations from clump of 2 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats3, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[3], "free page allocations from clump of 3 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats4, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[4], "free page allocations from clump of 4 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats5, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[5], "free page allocations from clump of 5 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats6, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[6], "free page allocations from clump of 6 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats7, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[7], "free page allocations from clump of 7 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats8, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[8], "free page allocations from clump of 8 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats9, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[9], "free page allocations from clump of 9 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats10, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[10], "free page allocations from clump of 10 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats11, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[11], "free page allocations from clump of 11 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats12, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[12], "free page allocations from clump of 12 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats13, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[13], "free page allocations from clump of 13 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats14, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[14], "free page allocations from clump of 14 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats15, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[15], "free page allocations from clump of 15 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats16, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[16], "free page allocations from clump of 16 pages");
+extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_alloc, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_allocs, "free page allocations");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_inserts, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_inserts, "free page insertions");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_inrange, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_inrange, "free page insertions that are part of vm_pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_promotes, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_promotes, "pages promoted to head");
+#endif  /* if DEVELOPMENT || DEBUG */
+#endif  /* #if defined (__x86_64__) */
+
+#if CONFIG_SECLUDED_MEMORY
+
+SYSCTL_UINT(_vm, OID_AUTO, num_tasks_can_use_secluded_mem, CTLFLAG_RD | CTLFLAG_LOCKED, &num_tasks_can_use_secluded_mem, 0, "");
+extern unsigned int vm_page_secluded_target;
+extern unsigned int vm_page_secluded_count;
+extern unsigned int vm_page_secluded_count_free;
+extern unsigned int vm_page_secluded_count_inuse;
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_target, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_target, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count_free, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count_free, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count_inuse, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count_inuse, 0, "");
+
+extern struct vm_page_secluded_data vm_page_secluded;
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_eligible, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.eligible_for_secluded, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_success_free, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_success_free, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_success_other, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_success_other, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_locked, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_locked, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_state, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_state, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_dirty, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit_success, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit_success, 0, "");
+
+#endif /* CONFIG_SECLUDED_MEMORY */
 
 #include <kern/thread.h>
 #include <sys/user.h>
 
 #include <kern/thread.h>
 #include <sys/user.h>
@@ -1646,3 +2292,105 @@ kas_info(struct proc *p,
        return 0;
 #endif /* !SECURE_KERNEL */
 }
        return 0;
 #endif /* !SECURE_KERNEL */
 }
+
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#pragma clang diagnostic ignored "-Wunused-function"
+
+static void asserts() {
+       static_assert(sizeof(vm_min_kernel_address) == sizeof(unsigned long));
+       static_assert(sizeof(vm_max_kernel_address) == sizeof(unsigned long));
+}
+
+SYSCTL_ULONG(_vm, OID_AUTO, vm_min_kernel_address, CTLFLAG_RD, (unsigned long *) &vm_min_kernel_address, "");
+SYSCTL_ULONG(_vm, OID_AUTO, vm_max_kernel_address, CTLFLAG_RD, (unsigned long *) &vm_max_kernel_address, "");
+#pragma clang diagnostic pop
+
+extern uint32_t vm_page_pages;
+SYSCTL_UINT(_vm, OID_AUTO, pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pages, 0, "");
+
+extern uint32_t vm_page_busy_absent_skipped;
+SYSCTL_UINT(_vm, OID_AUTO, page_busy_absent_skipped, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_busy_absent_skipped, 0, "");
+
+#if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
+extern int vm_footprint_suspend_allowed;
+SYSCTL_INT(_vm, OID_AUTO, footprint_suspend_allowed, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_footprint_suspend_allowed, 0, "");
+
+extern void pmap_footprint_suspend(vm_map_t map, boolean_t suspend);
+static int
+sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+       int error = 0;
+       int new_value;
+
+       if (req->newptr == USER_ADDR_NULL) {
+               return 0;
+       }
+       error = SYSCTL_IN(req, &new_value, sizeof(int));
+       if (error) {
+               return error;
+       }
+       if (!vm_footprint_suspend_allowed) {
+               if (new_value != 0) {
+                       /* suspends are not allowed... */
+                       return 0;
+               }
+               /* ... but let resumes proceed */
+       }
+       DTRACE_VM2(footprint_suspend,
+                  vm_map_t, current_map(),
+                  int, new_value);
+
+       pmap_footprint_suspend(current_map(), new_value);
+
+       return 0;
+}
+SYSCTL_PROC(_vm, OID_AUTO, footprint_suspend,
+           CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+           0, 0, &sysctl_vm_footprint_suspend, "I", "");
+#endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */
+
+extern uint64_t vm_map_corpse_footprint_count;
+extern uint64_t vm_map_corpse_footprint_size_avg;
+extern uint64_t vm_map_corpse_footprint_size_max;
+extern uint64_t vm_map_corpse_footprint_full;
+extern uint64_t vm_map_corpse_footprint_no_buf;
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_count,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_count, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_avg,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_avg, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_max,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_max, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_full,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_full, "");
+SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_no_buf,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, "");
+
+#if PMAP_CS
+extern uint64_t vm_cs_defer_to_pmap_cs;
+extern uint64_t vm_cs_defer_to_pmap_cs_not;
+SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, "");
+SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs_not,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, "");
+#endif /* PMAP_CS */
+
+extern uint64_t shared_region_pager_copied;
+extern uint64_t shared_region_pager_slid;
+extern uint64_t shared_region_pager_slid_error;
+extern uint64_t shared_region_pager_reclaimed;
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_copied,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_copied, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid_error,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid_error, "");
+SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_reclaimed,
+           CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_reclaimed, "");
+
+#if MACH_ASSERT
+extern int pmap_ledgers_panic_leeway;
+SYSCTL_INT(_vm, OID_AUTO, pmap_ledgers_panic_leeway, CTLFLAG_RW | CTLFLAG_LOCKED, &pmap_ledgers_panic_leeway, 0, "");
+#endif /* MACH_ASSERT */