#include <meta_features.h>
+#include <vm/vm_options.h>
+
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/debug.h>
-#include <kern/lock.h>
#include <kern/extmod_statistics.h>
#include <mach/mach_traps.h>
#include <mach/port.h>
#include <sys/cprotect.h>
#include <sys/kpi_socket.h>
#include <sys/kas_info.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
#include <security/audit/audit.h>
#include <security/mac.h>
int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
-SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor_pages, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_terminate, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_terminate, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_terminate_failure, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_terminate_failure, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_should_cow_but_wired, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.should_cow_but_wired, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_extra_cow, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_extra_cow, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_extra_cow_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_extra_cow_pages, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_lookup_failure_write, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_lookup_failure_write, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_lookup_failure_copy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_lookup_failure_copy, 0, "");
+#if VM_SCAN_FOR_SHADOW_CHAIN
+static int vm_shadow_max_enabled = 0; /* Disabled by default */
+extern int proc_shadow_max(void);
+static int
+vm_shadow_max SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ int value = 0;
+
+ if (vm_shadow_max_enabled)
+ value = proc_shadow_max();
+
+ return SYSCTL_OUT(req, &value, sizeof(value));
+}
+SYSCTL_PROC(_vm, OID_AUTO, vm_shadow_max, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED,
+ 0, 0, &vm_shadow_max, "I", "");
+
+SYSCTL_INT(_vm, OID_AUTO, vm_shadow_max_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_shadow_max_enabled, 0, "");
+
+#endif /* VM_SCAN_FOR_SHADOW_CHAIN */
+SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, "");
+__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
+ mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid);
/*
* Sysctl's related to data/stack execution. See osfmk/vm/vm_map.c
*/
int vm_shared_region_unnest_log_interval = 10;
int shared_region_unnest_log_count_threshold = 5;
+/*
+ * Shared cache path enforcement.
+ */
+
+static int scdir_enforce = 1;
+static char scdir_path[] = "/var/db/dyld/";
+
+#ifndef SECURE_KERNEL
+SYSCTL_INT(_vm, OID_AUTO, enforce_shared_cache_dir, CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, "");
+#endif
+
/* These log rate throttling state variables aren't thread safe, but
* are sufficient unto the task.
*/
return allowed;
}
+/*
+ * __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__
+ *
+ * Description: Waits for the user space daemon to respond to the request
+ * we made. Function declared non inline to be visible in
+ * stackshots and spindumps as well as debugging.
+ */
+__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
+ mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid)
+{
+ return check_task_access(task_access_port, calling_pid, calling_gid, target_pid);
+}
+
/*
* Routine: task_for_pid
* Purpose:
}
/* Call up to the task access server */
- error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED)
}
/* Call up to the task access server */
- error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED)
}
/* Call up to the task access server */
- error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid);
if (error != MACH_MSG_SUCCESS) {
if (error == MACH_RCV_INTERRUPTED)
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n",
- current_thread(), p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
(uint64_t)uap->start_address));
/* retrieve the current tasks's shared region */
("shared_region: %p [%d(%s)] "
"check_np(0x%llx) "
"copyout(0x%llx) error %d\n",
- current_thread(), p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
(uint64_t)uap->start_address, (uint64_t)start_address,
error));
}
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] check_np(0x%llx) <- 0x%llx %d\n",
- current_thread(), p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
(uint64_t)uap->start_address, (uint64_t)start_address, error));
return error;
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(): "
"copyin(0x%llx, %d) failed (error=%d)\n",
- current_thread(), p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
(uint64_t)user_mappings, mappings_count, error));
}
return error;
int error;
kern_return_t kr;
struct fileproc *fp;
- struct vnode *vp, *root_vp;
+ struct vnode *vp, *root_vp, *scdir_vp;
struct vnode_attr va;
off_t fs;
memory_object_size_t file_size;
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] -> map\n",
- current_thread(), p->p_pid, p->p_comm));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm));
shared_region = NULL;
fp = NULL;
vp = NULL;
+ scdir_vp = NULL;
/* get file structure from file descriptor */
error = fp_lookup(p, fd, &fp, 0);
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map: "
"fd=%d lookup failed (error=%d)\n",
- current_thread(), p->p_pid, p->p_comm, fd, error));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm, fd, error));
goto done;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map: "
"fd=%d not a vnode (type=%d)\n",
- current_thread(), p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
fd, FILEGLOB_DTYPE(fp->f_fglob)));
error = EINVAL;
goto done;
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map: "
"fd=%d not readable\n",
- current_thread(), p->p_pid, p->p_comm, fd));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm, fd));
error = EPERM;
goto done;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map: "
"fd=%d getwithref failed (error=%d)\n",
- current_thread(), p->p_pid, p->p_comm, fd, error));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm, fd, error));
goto done;
}
vp = (struct vnode *) fp->f_fglob->fg_data;
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"not a file (type=%d)\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name, vp->v_type));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp),
+ vp->v_name, vp->v_type));
error = EINVAL;
goto done;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"not on process's root volume\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
error = EPERM;
goto done;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"vnode_getattr(%p) failed (error=%d)\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name, vp, error));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+ (void *)VM_KERNEL_ADDRPERM(vp), error));
goto done;
}
if (va.va_uid != 0) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"owned by uid=%d instead of 0\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name, va.va_uid));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp),
+ vp->v_name, va.va_uid));
error = EPERM;
goto done;
}
+ if (scdir_enforce) {
+ /* get vnode for scdir_path */
+ error = vnode_lookup(scdir_path, 0, &scdir_vp, vfs_context_current());
+ if (error) {
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: %p [%d(%s)] map(%p:'%s'): "
+ "vnode_lookup(%s) failed (error=%d)\n",
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+ scdir_path, error));
+ goto done;
+ }
+
+ /* ensure parent is scdir_vp */
+ if (vnode_parent(vp) != scdir_vp) {
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: %p [%d(%s)] map(%p:'%s'): "
+ "shared cache file not in %s\n",
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp),
+ vp->v_name, scdir_path));
+ error = EPERM;
+ goto done;
+ }
+ }
+
/* get vnode size */
error = vnode_size(vp, &fs, vfs_context_current());
if (error) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"vnode_size(%p) failed (error=%d)\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name, vp, error));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+ (void *)VM_KERNEL_ADDRPERM(vp), error));
goto done;
}
file_size = fs;
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"no memory object\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
error = EINVAL;
goto done;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"no shared region\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name));
goto done;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(%p:'%s'): "
"vm_shared_region_map_file() failed kr=0x%x\n",
- current_thread(), p->p_pid, p->p_comm,
- vp, vp->v_name, kr));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, kr));
switch (kr) {
case KERN_INVALID_ADDRESS:
error = EFAULT;
fp_drop(p, fd, fp, 0);
fp = NULL;
}
+ if (scdir_vp != NULL) {
+ (void)vnode_put(scdir_vp);
+ scdir_vp = NULL;
+ }
if (shared_region != NULL) {
vm_shared_region_deallocate(shared_region);
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] <- map\n",
- current_thread(), p->p_pid, p->p_comm));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm));
return error;
}
SHARED_REGION_TRACE_INFO(
("shared_region: %p [%d(%s)] map(): "
"no mappings\n",
- current_thread(), p->p_pid, p->p_comm));
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm));
kr = 0; /* no mappings: we're done ! */
goto done;
} else if (mappings_count <= SFM_MAX_STACK) {
SHARED_REGION_TRACE_ERROR(
("shared_region: %p [%d(%s)] map(): "
"too many mappings (%d)\n",
- current_thread(), p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
mappings_count));
kr = KERN_FAILURE;
goto done;
/* sysctl overflow room */
+SYSCTL_INT (_vm, OID_AUTO, pagesize, CTLFLAG_RD | CTLFLAG_LOCKED,
+ (int *) &page_size, 0, "vm page size");
+
/* vm_page_free_target is provided as a makeshift solution for applications that want to
allocate buffer space, possibly purgeable memory, but not cause inactive pages to be
reclaimed. It allows the app to calculate how much memory is free outside the free target. */
SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)");
SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)");
+/* counts of pages prefaulted when entering a memory object */
+extern int64_t vm_prefault_nb_pages, vm_prefault_nb_bailout;
+SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_pages, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_pages, "");
+SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_bailout, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_bailout, "");
+
#include <kern/thread.h>
#include <sys/user.h>