/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* is included in support of clause 2.2 (b) of the Apple Public License,
* Version 2.0.
*/
-
-#include <meta_features.h>
-
#include <vm/vm_options.h>
#include <kern/task.h>
#include <kern/extmod_statistics.h>
#include <mach/mach_traps.h>
#include <mach/port.h>
+#include <mach/sdt.h>
#include <mach/task.h>
#include <mach/task_access.h>
#include <mach/task_special_ports.h>
#include <sys/kas_info.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
+#if NECP
+#include <net/necp.h>
+#endif /* NECP */
#include <security/audit/audit.h>
#include <security/mac.h>
#include <vm/vm_kern.h>
#include <vm/vm_pageout.h>
-#include <machine/spl.h>
-
#include <mach/shared_region.h>
#include <vm/vm_shared_region.h>
#include <sys/kern_memorystatus.h>
+#if CONFIG_MACF
+#include <security/mac_framework.h>
+#endif
int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
+#if VM_MAP_DEBUG_APPLE_PROTECT
+SYSCTL_INT(_vm, OID_AUTO, map_debug_apple_protect, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_map_debug_apple_protect, 0, "");
+#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
+
+#if VM_MAP_DEBUG_FOURK
+SYSCTL_INT(_vm, OID_AUTO, map_debug_fourk, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_map_debug_fourk, 0, "");
+#endif /* VM_MAP_DEBUG_FOURK */
+
+#if DEVELOPMENT || DEBUG
+
+static int
+sysctl_kmem_alloc_contig SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ vm_offset_t kaddr;
+ kern_return_t kr;
+ int error = 0;
+ int size = 0;
+
+ error = sysctl_handle_int(oidp, &size, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ kr = kmem_alloc_contig(kernel_map, &kaddr, (vm_size_t)size, 0, 0, 0, 0, VM_KERN_MEMORY_IOKIT);
+
+ if (kr == KERN_SUCCESS)
+ kmem_free(kernel_map, kaddr, size);
+
+ return error;
+}
+
+SYSCTL_PROC(_vm, OID_AUTO, kmem_alloc_contig, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+ 0, 0, &sysctl_kmem_alloc_contig, "I", "");
+
+extern int vm_region_footprint;
+SYSCTL_INT(_vm, OID_AUTO, region_footprint, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_region_footprint, 0, "");
+
+#endif /* DEVELOPMENT || DEBUG */
+
+
+#if CONFIG_EMBEDDED
+
+#if DEVELOPMENT || DEBUG
+extern int panic_on_unsigned_execute;
+SYSCTL_INT(_vm, OID_AUTO, panic_on_unsigned_execute, CTLFLAG_RW | CTLFLAG_LOCKED, &panic_on_unsigned_execute, 0, "");
+#endif /* DEVELOPMENT || DEBUG */
+
+extern int log_executable_mem_entry;
+extern int cs_executable_create_upl;
+extern int cs_executable_mem_entry;
+extern int cs_executable_wire;
+SYSCTL_INT(_vm, OID_AUTO, log_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &log_executable_mem_entry, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_create_upl, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_create_upl, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_mem_entry, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, cs_executable_wire, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_wire, 0, "");
+#endif /* CONFIG_EMBEDDED */
+
#if DEVELOPMENT || DEBUG
extern int radar_20146450;
SYSCTL_INT(_vm, OID_AUTO, radar_20146450, CTLFLAG_RW | CTLFLAG_LOCKED, &radar_20146450, 0, "");
extern int apple_protect_pager_data_request_debug;
SYSCTL_INT(_vm, OID_AUTO, apple_protect_pager_data_request_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &apple_protect_pager_data_request_debug, 0, "");
+#if __arm__ || __arm64__
+/* These are meant to support the page table accounting unit test. */
+extern unsigned int arm_hardware_page_size;
+extern unsigned int arm_pt_desc_size;
+extern unsigned int arm_pt_root_size;
+extern unsigned int free_page_size_tt_count;
+extern unsigned int free_two_page_size_tt_count;
+extern unsigned int free_tt_count;
+extern unsigned int inuse_user_tteroot_count;
+extern unsigned int inuse_kernel_tteroot_count;
+extern unsigned int inuse_user_ttepages_count;
+extern unsigned int inuse_kernel_ttepages_count;
+extern unsigned int inuse_user_ptepages_count;
+extern unsigned int inuse_kernel_ptepages_count;
+SYSCTL_UINT(_vm, OID_AUTO, native_hw_pagesize, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_hardware_page_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, arm_pt_desc_size, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_pt_desc_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, arm_pt_root_size, CTLFLAG_RD | CTLFLAG_LOCKED, &arm_pt_root_size, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_1page_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_page_size_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_2page_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_two_page_size_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, free_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &free_tt_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_tteroot_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_tte_root, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_tteroot_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_tte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_ttepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_tte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_ttepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, user_pte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_user_ptepages_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, kernel_pte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse_kernel_ptepages_count, 0, "");
+#endif /* __arm__ || __arm64__ */
+
+#if __arm64__
+extern int fourk_pager_data_request_debug;
+SYSCTL_INT(_vm, OID_AUTO, fourk_pager_data_request_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &fourk_pager_data_request_debug, 0, "");
+#endif /* __arm64__ */
#endif /* DEVELOPMENT || DEBUG */
SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor, 0, "");
* Sysctl's related to data/stack execution. See osfmk/vm/vm_map.c
*/
-#ifndef SECURE_KERNEL
+#if DEVELOPMENT || DEBUG
extern int allow_stack_exec, allow_data_exec;
SYSCTL_INT(_vm, OID_AUTO, allow_stack_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_stack_exec, 0, "");
SYSCTL_INT(_vm, OID_AUTO, allow_data_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_data_exec, 0, "");
-#endif /* !SECURE_KERNEL */
+#if __arm64__
+extern int fourk_binary_compatibility_unsafe;
+extern int fourk_binary_compatibility_allow_wx;
+SYSCTL_INT(_vm, OID_AUTO, fourk_binary_compatibility_unsafe, CTLFLAG_RW | CTLFLAG_LOCKED, &fourk_binary_compatibility_unsafe, 0, "");
+SYSCTL_INT(_vm, OID_AUTO, fourk_binary_compatibility_allow_wx, CTLFLAG_RW | CTLFLAG_LOCKED, &fourk_binary_compatibility_allow_wx, 0, "");
+#endif /* __arm64__ */
+#endif /* DEVELOPMENT || DEBUG */
static const char *prot_values[] = {
"none",
current_proc()->p_comm, current_proc()->p_pid, vaddr, prot_values[prot & VM_PROT_ALL]);
}
+/*
+ * shared_region_unnest_logging: level of logging of unnesting events
+ * 0 - no logging
+ * 1 - throttled logging of unexpected unnesting events (default)
+ * 2 - unthrottled logging of unexpected unnesting events
+ * 3+ - unthrottled logging of all unnesting events
+ */
int shared_region_unnest_logging = 1;
SYSCTL_INT(_vm, OID_AUTO, shared_region_unnest_logging, CTLFLAG_RW | CTLFLAG_LOCKED,
* Shared cache path enforcement.
*/
+#ifndef CONFIG_EMBEDDED
static int scdir_enforce = 1;
static char scdir_path[] = "/var/db/dyld/";
+#else
+static int scdir_enforce = 0;
+static char scdir_path[] = "/System/Library/Caches/com.apple.dyld/";
+#endif
#ifndef SECURE_KERNEL
SYSCTL_INT(_vm, OID_AUTO, enforce_shared_cache_dir, CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, "");
static int64_t last_unnest_log_time = 0;
static int shared_region_unnest_log_count = 0;
-void log_unnest_badness(
+void
+log_unnest_badness(
vm_map_t m,
vm_map_offset_t s,
- vm_map_offset_t e) {
+ vm_map_offset_t e,
+ boolean_t is_nested_map,
+ vm_map_offset_t lowest_unnestable_addr)
+{
struct timeval tv;
if (shared_region_unnest_logging == 0)
return;
- if (shared_region_unnest_logging == 1) {
+ if (shared_region_unnest_logging <= 2 &&
+ is_nested_map &&
+ s >= lowest_unnestable_addr) {
+ /*
+ * Unnesting of writable map entries is fine.
+ */
+ return;
+ }
+
+ if (shared_region_unnest_logging <= 1) {
microtime(&tv);
- if ((tv.tv_sec - last_unnest_log_time) < vm_shared_region_unnest_log_interval) {
- if (shared_region_unnest_log_count++ > shared_region_unnest_log_count_threshold)
+ if ((tv.tv_sec - last_unnest_log_time) <
+ vm_shared_region_unnest_log_interval) {
+ if (shared_region_unnest_log_count++ >
+ shared_region_unnest_log_count_threshold)
return;
- }
- else {
+ } else {
last_unnest_log_time = tv.tv_sec;
shared_region_unnest_log_count = 0;
}
}
+ DTRACE_VM4(log_unnest_badness,
+ vm_map_t, m,
+ vm_map_offset_t, s,
+ vm_map_offset_t, e,
+ vm_map_offset_t, lowest_unnestable_addr);
printf("%s[%d] triggered unnest of range 0x%qx->0x%qx of DYLD shared region in VM map %p. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, current_proc()->p_pid, (uint64_t)s, (uint64_t)e, (void *) VM_KERNEL_ADDRPERM(m));
}
vm_map_t map;
map = current_map();
- kret = vm_map_wire(map,
+ kret = vm_map_wire_kernel(map,
vm_map_trunc_page(addr,
vm_map_page_mask(map)),
vm_map_round_page(addr+len,
vm_map_page_mask(map)),
- VM_PROT_READ | VM_PROT_WRITE | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_BSD),
+ VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_BSD,
FALSE);
switch (kret) {
AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
AUDIT_ARG(mach_port1, t);
- t1 = port_name_to_task(t);
+ t1 = port_name_to_task_inspect(t);
if (t1 == TASK_NULL) {
err = KERN_FAILURE;
if (p) {
pid = proc_pid(p);
err = KERN_SUCCESS;
- } else {
+ } else if (is_corpsetask(t1)) {
+ pid = task_pid(t1);
+ err = KERN_SUCCESS;
+ }else {
err = KERN_FAILURE;
}
}
extmod_statistics_incr_task_for_pid(p->task);
sright = (void *) convert_task_to_port(p->task);
+
+ /* Check if the task has been corpsified */
+ if (is_corpsetask(p->task)) {
+ ipc_port_release_send(sright);
+ error = KERN_FAILURE;
+ goto tfpout;
+ }
+
tret = ipc_port_copyout_send(
sright,
get_task_ipcspace(current_task()));
}
target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
if (target != TASK_NULL) {
mach_port_t tfpport;
}
}
}
+#endif
task_reference(target);
error = task_pidsuspend(target);
}
target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
if (target != TASK_NULL) {
mach_port_t tfpport;
}
}
}
+#endif
+#if CONFIG_EMBEDDED
+#if SOCKETS
+ resume_proc_sockets(targetproc);
+#endif /* SOCKETS */
+#endif /* CONFIG_EMBEDDED */
task_reference(target);
return error;
}
+#if CONFIG_EMBEDDED
+/*
+ * Freeze the specified process (provided in args->pid), or find and freeze a PID.
+ * When a process is specified, this call is blocking, otherwise we wake up the
+ * freezer thread and do not block on a process being frozen.
+ */
+kern_return_t
+pid_hibernate(struct proc *p __unused, struct pid_hibernate_args *args, int *ret)
+{
+ int error = 0;
+ proc_t targetproc = PROC_NULL;
+ int pid = args->pid;
+
+#ifndef CONFIG_FREEZE
+ #pragma unused(pid)
+#else
+
+#if CONFIG_MACF
+ error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_HIBERNATE);
+ if (error) {
+ error = EPERM;
+ goto out;
+ }
+#endif
+
+ /*
+ * If a pid has been provided, we obtain the process handle and call task_for_pid_posix_check().
+ */
+
+ if (pid >= 0) {
+ targetproc = proc_find(pid);
+
+ if (targetproc == PROC_NULL) {
+ error = ESRCH;
+ goto out;
+ }
+
+ if (!task_for_pid_posix_check(targetproc)) {
+ error = EPERM;
+ goto out;
+ }
+ }
+
+ if (pid == -2) {
+ vm_pageout_anonymous_pages();
+ } else if (pid == -1) {
+ memorystatus_on_inactivity(targetproc);
+ } else {
+ error = memorystatus_freeze_process_sync(targetproc);
+ }
+
+out:
+
+#endif /* CONFIG_FREEZE */
+
+ if (targetproc != PROC_NULL)
+ proc_rele(targetproc);
+ *ret = error;
+ return error;
+}
+#endif /* CONFIG_EMBEDDED */
+
+#if SOCKETS
+static int
+shutdown_sockets_callout(proc_t p, void *arg)
+{
+ struct pid_shutdown_sockets_args *args = arg;
+ int pid = args->pid;
+ int level = args->level;
+ struct filedesc *fdp;
+ struct fileproc *fp;
+ int i;
+
+ proc_fdlock(p);
+ fdp = p->p_fd;
+ for (i = 0; i < fdp->fd_nfiles; i++) {
+ fp = fdp->fd_ofiles[i];
+ if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) {
+ continue;
+ }
+ if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) {
+ struct socket *so = (struct socket *)fp->f_fglob->fg_data;
+ if (p->p_pid == pid || so->last_pid == pid ||
+ ((so->so_flags & SOF_DELEGATED) && so->e_pid == pid)) {
+ /* Call networking stack with socket and level */
+ (void) socket_defunct(p, so, level);
+ }
+ }
+#if NECP
+ else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY &&
+ p->p_pid == pid) {
+ necp_defunct_client(p, fp);
+ }
+#endif /* NECP */
+ }
+ proc_fdunlock(p);
+
+ return (PROC_RETURNED);
+}
+
+int
+pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args *args, int *ret)
+{
+ int error = 0;
+ proc_t targetproc = PROC_NULL;
+ int pid = args->pid;
+ int level = args->level;
+
+ if (level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC &&
+ level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) {
+ error = EINVAL;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_SHUTDOWN_SOCKETS);
+ if (error) {
+ error = EPERM;
+ goto out;
+ }
+#endif
+
+ targetproc = proc_find(pid);
+ if (targetproc == PROC_NULL) {
+ error = ESRCH;
+ goto out;
+ }
+
+ if (!task_for_pid_posix_check(targetproc)) {
+ error = EPERM;
+ goto out;
+ }
+
+ proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS, shutdown_sockets_callout, args, NULL, NULL);
+
+out:
+ if (targetproc != PROC_NULL)
+ proc_rele(targetproc);
+ *ret = error;
+ return error;
+}
+
+#endif /* SOCKETS */
static int
sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1,
#endif
memory_object_control_t file_control;
struct vm_shared_region *shared_region;
+ uint32_t i;
SHARED_REGION_TRACE_DEBUG(
("shared_region: %p [%d(%s)] -> map\n",
}
#endif /* MAC */
-#if CONFIG_PROTECT
- /* check for content protection access */
- {
- error = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0);
- if (error) {
- goto done;
- }
- }
-#endif /* CONFIG_PROTECT */
-
/* make sure vnode is on the process's root volume */
root_vp = p->p_fd->fd_rdir;
if (root_vp == NULL) {
goto done;
}
+ /* check that the mappings are properly covered by code signatures */
+ if (!cs_enforcement(NULL)) {
+ /* code signing is not enforced: no need to check */
+ } else for (i = 0; i < mappings_count; i++) {
+ if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
+ /* zero-filled mapping: not backed by the file */
+ continue;
+ }
+ if (ubc_cs_is_range_codesigned(vp,
+ mappings[i].sfm_file_offset,
+ mappings[i].sfm_size)) {
+ /* this mapping is fully covered by code signatures */
+ continue;
+ }
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: %p [%d(%s)] map(%p:'%s'): "
+ "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] "
+ "is not code-signed\n",
+ (void *)VM_KERNEL_ADDRPERM(current_thread()),
+ p->p_pid, p->p_comm,
+ (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name,
+ i, mappings_count,
+ mappings[i].sfm_address,
+ mappings[i].sfm_size,
+ mappings[i].sfm_file_offset,
+ mappings[i].sfm_max_prot,
+ mappings[i].sfm_init_prot));
+ error = EINVAL;
+ goto done;
+ }
/* get the process's shared region (setup in vm_map_exec()) */
shared_region = vm_shared_region_get(current_task());
&vm_page_stats_reusable.reusable_pages_success, "");
SYSCTL_QUAD(_vm, OID_AUTO, reusable_failure, CTLFLAG_RD | CTLFLAG_LOCKED,
&vm_page_stats_reusable.reusable_pages_failure, "");
-SYSCTL_QUAD(_vm, OID_AUTO, reusable_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_pages_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
&vm_page_stats_reusable.reusable_pages_shared, "");
SYSCTL_QUAD(_vm, OID_AUTO, all_reusable_calls, CTLFLAG_RD | CTLFLAG_LOCKED,
&vm_page_stats_reusable.all_reusable_calls, "");
&vm_page_stats_reusable.can_reuse_failure, "");
SYSCTL_QUAD(_vm, OID_AUTO, reusable_reclaimed, CTLFLAG_RD | CTLFLAG_LOCKED,
&vm_page_stats_reusable.reusable_reclaimed, "");
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_nonwritable, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vm_page_stats_reusable.reusable_nonwritable, "");
+SYSCTL_QUAD(_vm, OID_AUTO, reusable_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vm_page_stats_reusable.reusable_shared, "");
+SYSCTL_QUAD(_vm, OID_AUTO, free_shared, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &vm_page_stats_reusable.free_shared, "");
extern unsigned int vm_page_free_count, vm_page_speculative_count;
extern unsigned int vm_page_cleaned_count;
SYSCTL_UINT(_vm, OID_AUTO, page_cleaned_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_cleaned_count, 0, "Cleaned queue size");
+extern unsigned int vm_page_pageable_internal_count, vm_page_pageable_external_count;
+SYSCTL_UINT(_vm, OID_AUTO, page_pageable_internal_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_internal_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_pageable_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pageable_external_count, 0, "");
+
/* pageout counts */
extern unsigned int vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external, vm_pageout_inactive_clean, vm_pageout_speculative_clean, vm_pageout_inactive_used;
extern unsigned int vm_pageout_freed_from_inactive_clean, vm_pageout_freed_from_speculative;
SYSCTL_UINT(_vm, OID_AUTO, pageout_freed_from_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_freed_from_cleaned, 0, "");
/* counts of pages entering the cleaned queue */
-extern unsigned int vm_pageout_enqueued_cleaned, vm_pageout_enqueued_cleaned_from_inactive_clean, vm_pageout_enqueued_cleaned_from_inactive_dirty;
+extern unsigned int vm_pageout_enqueued_cleaned, vm_pageout_enqueued_cleaned_from_inactive_dirty;
SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */
-SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_clean, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_clean, 0, "");
SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned_from_inactive_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_enqueued_cleaned_from_inactive_dirty, 0, "");
/* counts of pages leaving the cleaned queue */
SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_pages, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_pages, "");
SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_bailout, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_bailout, "");
+#if defined (__x86_64__)
+extern unsigned int vm_clump_promote_threshold;
+SYSCTL_UINT(_vm, OID_AUTO, vm_clump_promote_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_clump_promote_threshold, 0, "clump size threshold for promotes");
+#if DEVELOPMENT || DEBUG
+extern unsigned long vm_clump_stats[];
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats1, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[1], "free page allocations from clump of 1 page");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats2, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[2], "free page allocations from clump of 2 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats3, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[3], "free page allocations from clump of 3 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats4, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[4], "free page allocations from clump of 4 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats5, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[5], "free page allocations from clump of 5 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats6, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[6], "free page allocations from clump of 6 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats7, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[7], "free page allocations from clump of 7 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats8, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[8], "free page allocations from clump of 8 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats9, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[9], "free page allocations from clump of 9 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats10, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[10], "free page allocations from clump of 10 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats11, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[11], "free page allocations from clump of 11 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats12, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[12], "free page allocations from clump of 12 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats13, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[13], "free page allocations from clump of 13 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats14, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[14], "free page allocations from clump of 14 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats15, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[15], "free page allocations from clump of 15 pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_stats16, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_stats[16], "free page allocations from clump of 16 pages");
+extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_alloc, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_allocs, "free page allocations");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_inserts, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_inserts, "free page insertions");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_inrange, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_inrange, "free page insertions that are part of vm_pages");
+SYSCTL_LONG(_vm, OID_AUTO, vm_clump_promotes, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_clump_promotes, "pages promoted to head");
+#endif /* if DEVELOPMENT || DEBUG */
+#endif /* #if defined (__x86_64__) */
+
+#if CONFIG_SECLUDED_MEMORY
+
+SYSCTL_UINT(_vm, OID_AUTO, num_tasks_can_use_secluded_mem, CTLFLAG_RD | CTLFLAG_LOCKED, &num_tasks_can_use_secluded_mem, 0, "");
+extern unsigned int vm_page_secluded_target;
+extern unsigned int vm_page_secluded_count;
+extern unsigned int vm_page_secluded_count_free;
+extern unsigned int vm_page_secluded_count_inuse;
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_target, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_target, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count_free, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count_free, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_count_inuse, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded_count_inuse, 0, "");
+
+extern struct vm_page_secluded_data vm_page_secluded;
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_eligible, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.eligible_for_secluded, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_success_free, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_success_free, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_success_other, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_success_other, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_locked, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_locked, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_state, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_state, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_failure_dirty, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_failure_dirty, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit, 0, "");
+SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit_success, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_secluded.grab_for_iokit_success, 0, "");
+
+extern uint64_t vm_pageout_secluded_burst_count;
+SYSCTL_QUAD(_vm, OID_AUTO, pageout_secluded_burst_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_secluded_burst_count, "");
+
+#endif /* CONFIG_SECLUDED_MEMORY */
+
#include <kern/thread.h>
#include <sys/user.h>
return 0;
#endif /* !SECURE_KERNEL */
}
+
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#pragma clang diagnostic ignored "-Wunused-function"
+
+static void asserts() {
+ static_assert(sizeof(vm_min_kernel_address) == sizeof(unsigned long));
+ static_assert(sizeof(vm_max_kernel_address) == sizeof(unsigned long));
+}
+
+SYSCTL_ULONG(_vm, OID_AUTO, vm_min_kernel_address, CTLFLAG_RD, (unsigned long *) &vm_min_kernel_address, "");
+SYSCTL_ULONG(_vm, OID_AUTO, vm_max_kernel_address, CTLFLAG_RD, (unsigned long *) &vm_max_kernel_address, "");
+#pragma clang diagnostic pop
+
+extern uint32_t vm_page_pages;
+SYSCTL_UINT(_vm, OID_AUTO, pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_pages, 0, "");
+
+#if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
+extern void pmap_footprint_suspend(vm_map_t map, boolean_t suspend);
+static int
+sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error = 0;
+ int new_value;
+
+ if (req->newptr == USER_ADDR_NULL) {
+ return 0;
+ }
+ error = SYSCTL_IN(req, &new_value, sizeof(int));
+ if (error) {
+ return error;
+ }
+ pmap_footprint_suspend(current_map(), new_value);
+ return 0;
+}
+SYSCTL_PROC(_vm, OID_AUTO, footprint_suspend,
+ CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+ 0, 0, &sysctl_vm_footprint_suspend, "I", "");
+#endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */