X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/39236c6e673c41db228275375ab7fdb0f837b292..ecc0ceb4089d506a0b8d16686a95817b331af9cb:/bsd/vm/vm_unix.c diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index 52f21a6ce..73abe01da 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -40,10 +40,11 @@ #include +#include + #include #include #include -#include #include #include #include @@ -78,6 +79,8 @@ #include #include #include +#include +#include #include #include @@ -98,12 +101,56 @@ #include -int _shared_region_map( struct proc*, int, unsigned int, struct shared_file_mapping_np*, memory_object_control_t*, struct shared_file_mapping_np*); +int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t); int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *); -SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, ""); +#if DEVELOPMENT || DEBUG +extern int radar_20146450; +SYSCTL_INT(_vm, OID_AUTO, radar_20146450, CTLFLAG_RW | CTLFLAG_LOCKED, &radar_20146450, 0, ""); + +extern int macho_printf; +SYSCTL_INT(_vm, OID_AUTO, macho_printf, CTLFLAG_RW | CTLFLAG_LOCKED, &macho_printf, 0, ""); + +extern int apple_protect_pager_data_request_debug; +SYSCTL_INT(_vm, OID_AUTO, apple_protect_pager_data_request_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &apple_protect_pager_data_request_debug, 0, ""); + +#endif /* DEVELOPMENT || DEBUG */ + +SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_compressor_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_compressor_pages, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_terminate, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_terminate, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_do_collapse_terminate_failure, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.do_collapse_terminate_failure, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_should_cow_but_wired, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.should_cow_but_wired, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_extra_cow, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_extra_cow, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_extra_cow_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_extra_cow_pages, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_lookup_failure_write, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_lookup_failure_write, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_create_upl_lookup_failure_copy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_counters.create_upl_lookup_failure_copy, 0, ""); +#if VM_SCAN_FOR_SHADOW_CHAIN +static int vm_shadow_max_enabled = 0; /* Disabled by default */ +extern int proc_shadow_max(void); +static int +vm_shadow_max SYSCTL_HANDLER_ARGS +{ +#pragma unused(arg1, arg2, oidp) + int value = 0; + + if (vm_shadow_max_enabled) + value = proc_shadow_max(); + + return SYSCTL_OUT(req, &value, sizeof(value)); +} +SYSCTL_PROC(_vm, OID_AUTO, vm_shadow_max, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED, + 0, 0, &vm_shadow_max, "I", ""); + +SYSCTL_INT(_vm, OID_AUTO, vm_shadow_max_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_shadow_max_enabled, 0, ""); + +#endif /* VM_SCAN_FOR_SHADOW_CHAIN */ +SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, ""); + +__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__( + mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid); /* * Sysctl's related to data/stack execution. See osfmk/vm/vm_map.c */ @@ -113,6 +160,7 @@ extern int allow_stack_exec, allow_data_exec; SYSCTL_INT(_vm, OID_AUTO, allow_stack_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_stack_exec, 0, ""); SYSCTL_INT(_vm, OID_AUTO, allow_data_exec, CTLFLAG_RW | CTLFLAG_LOCKED, &allow_data_exec, 0, ""); + #endif /* !SECURE_KERNEL */ static const char *prot_values[] = { @@ -141,15 +189,28 @@ SYSCTL_INT(_vm, OID_AUTO, shared_region_unnest_logging, CTLFLAG_RW | CTLFLAG_LOC int vm_shared_region_unnest_log_interval = 10; int shared_region_unnest_log_count_threshold = 5; +/* + * Shared cache path enforcement. + */ + +static int scdir_enforce = 1; +static char scdir_path[] = "/var/db/dyld/"; + +#ifndef SECURE_KERNEL +SYSCTL_INT(_vm, OID_AUTO, enforce_shared_cache_dir, CTLFLAG_RW | CTLFLAG_LOCKED, &scdir_enforce, 0, ""); +#endif + /* These log rate throttling state variables aren't thread safe, but * are sufficient unto the task. */ static int64_t last_unnest_log_time = 0; static int shared_region_unnest_log_count = 0; -void log_unnest_badness(vm_map_t m, vm_map_offset_t s, vm_map_offset_t e) { - struct timeval tv; - const char *pcommstr; +void log_unnest_badness( + vm_map_t m, + vm_map_offset_t s, + vm_map_offset_t e) { + struct timeval tv; if (shared_region_unnest_logging == 0) return; @@ -166,9 +227,7 @@ void log_unnest_badness(vm_map_t m, vm_map_offset_t s, vm_map_offset_t e) { } } - pcommstr = current_proc()->p_comm; - - printf("%s (map: %p) triggered DYLD shared region unnest for map: %p, region 0x%qx->0x%qx. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, get_task_map(current_proc()->task), m, (uint64_t)s, (uint64_t)e); + printf("%s[%d] triggered unnest of range 0x%qx->0x%qx of DYLD shared region in VM map %p. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, current_proc()->p_pid, (uint64_t)s, (uint64_t)e, (void *) VM_KERNEL_ADDRPERM(m)); } int @@ -203,7 +262,7 @@ vslock( vm_map_page_mask(map)), vm_map_round_page(addr+len, vm_map_page_mask(map)), - VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_BSD), FALSE); switch (kret) { @@ -544,6 +603,19 @@ out: return allowed; } +/* + * __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__ + * + * Description: Waits for the user space daemon to respond to the request + * we made. Function declared non inline to be visible in + * stackshots and spindumps as well as debugging. + */ +__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__( + mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid) +{ + return check_task_access(task_access_port, calling_pid, calling_gid, target_pid); +} + /* * Routine: task_for_pid * Purpose: @@ -618,7 +690,7 @@ task_for_pid( } /* Call up to the task access server */ - error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid); + error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { if (error == MACH_RCV_INTERRUPTED) @@ -793,7 +865,7 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) } /* Call up to the task access server */ - error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid); + error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { if (error == MACH_RCV_INTERRUPTED) @@ -877,7 +949,7 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) } /* Call up to the task access server */ - error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid); + error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { if (error == MACH_RCV_INTERRUPTED) @@ -889,6 +961,7 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) } } + task_reference(target); #if CONFIG_MEMORYSTATUS @@ -1004,7 +1077,8 @@ shared_region_check_np( SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n", - current_thread(), p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, (uint64_t)uap->start_address)); /* retrieve the current tasks's shared region */ @@ -1025,7 +1099,8 @@ shared_region_check_np( ("shared_region: %p [%d(%s)] " "check_np(0x%llx) " "copyout(0x%llx) error %d\n", - current_thread(), p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, (uint64_t)uap->start_address, (uint64_t)start_address, error)); } @@ -1038,7 +1113,8 @@ shared_region_check_np( SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] check_np(0x%llx) <- 0x%llx %d\n", - current_thread(), p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, (uint64_t)uap->start_address, (uint64_t)start_address, error)); return error; @@ -1064,7 +1140,8 @@ shared_region_copyin_mappings( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(): " "copyin(0x%llx, %d) failed (error=%d)\n", - current_thread(), p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, (uint64_t)user_mappings, mappings_count, error)); } return error; @@ -1080,18 +1157,19 @@ shared_region_copyin_mappings( * requiring any further setup. */ int -_shared_region_map( +_shared_region_map_and_slide( struct proc *p, int fd, uint32_t mappings_count, struct shared_file_mapping_np *mappings, - memory_object_control_t *sr_file_control, - struct shared_file_mapping_np *mapping_to_slide) + uint32_t slide, + user_addr_t slide_start, + user_addr_t slide_size) { int error; kern_return_t kr; struct fileproc *fp; - struct vnode *vp, *root_vp; + struct vnode *vp, *root_vp, *scdir_vp; struct vnode_attr va; off_t fs; memory_object_size_t file_size; @@ -1103,11 +1181,13 @@ _shared_region_map( SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] -> map\n", - current_thread(), p->p_pid, p->p_comm)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); shared_region = NULL; fp = NULL; vp = NULL; + scdir_vp = NULL; /* get file structure from file descriptor */ error = fp_lookup(p, fd, &fp, 0); @@ -1115,7 +1195,8 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d lookup failed (error=%d)\n", - current_thread(), p->p_pid, p->p_comm, fd, error)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, fd, error)); goto done; } @@ -1124,7 +1205,8 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d not a vnode (type=%d)\n", - current_thread(), p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, fd, FILEGLOB_DTYPE(fp->f_fglob))); error = EINVAL; goto done; @@ -1135,7 +1217,8 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d not readable\n", - current_thread(), p->p_pid, p->p_comm, fd)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, fd)); error = EPERM; goto done; } @@ -1146,7 +1229,8 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d getwithref failed (error=%d)\n", - current_thread(), p->p_pid, p->p_comm, fd, error)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, fd, error)); goto done; } vp = (struct vnode *) fp->f_fglob->fg_data; @@ -1156,15 +1240,19 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "not a file (type=%d)\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name, vp->v_type)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), + vp->v_name, vp->v_type)); error = EINVAL; goto done; } #if CONFIG_MACF + /* pass in 0 for the offset argument because AMFI does not need the offset + of the shared cache */ error = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()), - fp->f_fglob, VM_PROT_ALL, MAP_FILE, &maxprot); + fp->f_fglob, VM_PROT_ALL, MAP_FILE, 0, &maxprot); if (error) { goto done; } @@ -1196,8 +1284,9 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "not on process's root volume\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); error = EPERM; goto done; } @@ -1210,28 +1299,62 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vnode_getattr(%p) failed (error=%d)\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name, vp, error)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, + (void *)VM_KERNEL_ADDRPERM(vp), error)); goto done; } if (va.va_uid != 0) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "owned by uid=%d instead of 0\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name, va.va_uid)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), + vp->v_name, va.va_uid)); error = EPERM; goto done; } + if (scdir_enforce) { + /* get vnode for scdir_path */ + error = vnode_lookup(scdir_path, 0, &scdir_vp, vfs_context_current()); + if (error) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "vnode_lookup(%s) failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, + scdir_path, error)); + goto done; + } + + /* ensure parent is scdir_vp */ + if (vnode_parent(vp) != scdir_vp) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "shared cache file not in %s\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), + vp->v_name, scdir_path)); + error = EPERM; + goto done; + } + } + /* get vnode size */ error = vnode_size(vp, &fs, vfs_context_current()); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vnode_size(%p) failed (error=%d)\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name, vp, error)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, + (void *)VM_KERNEL_ADDRPERM(vp), error)); goto done; } file_size = fs; @@ -1242,17 +1365,13 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no memory object\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); error = EINVAL; goto done; } - if (sr_file_control != NULL) { - *sr_file_control = file_control; - } - - /* get the process's shared region (setup in vm_map_exec()) */ shared_region = vm_shared_region_get(current_task()); @@ -1260,8 +1379,9 @@ _shared_region_map( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no shared region\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); goto done; } @@ -1272,13 +1392,16 @@ _shared_region_map( file_control, file_size, (void *) p->p_fd->fd_rdir, - mapping_to_slide); + slide, + slide_start, + slide_size); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vm_shared_region_map_file() failed kr=0x%x\n", - current_thread(), p->p_pid, p->p_comm, - vp, vp->v_name, kr)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, kr)); switch (kr) { case KERN_INVALID_ADDRESS: error = EFAULT; @@ -1333,6 +1456,10 @@ done: fp_drop(p, fd, fp, 0); fp = NULL; } + if (scdir_vp != NULL) { + (void)vnode_put(scdir_vp); + scdir_vp = NULL; + } if (shared_region != NULL) { vm_shared_region_deallocate(shared_region); @@ -1340,7 +1467,8 @@ done: SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] <- map\n", - current_thread(), p->p_pid, p->p_comm)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); return error; } @@ -1351,11 +1479,8 @@ shared_region_map_and_slide_np( struct shared_region_map_and_slide_np_args *uap, __unused int *retvalp) { - struct shared_file_mapping_np mapping_to_slide; struct shared_file_mapping_np *mappings; - unsigned int mappings_count = uap->count; - - memory_object_control_t sr_file_control; + unsigned int mappings_count = uap->count; kern_return_t kr = KERN_SUCCESS; uint32_t slide = uap->slide; @@ -1384,7 +1509,8 @@ shared_region_map_and_slide_np( SHARED_REGION_TRACE_INFO( ("shared_region: %p [%d(%s)] map(): " "no mappings\n", - current_thread(), p->p_pid, p->p_comm)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); kr = 0; /* no mappings: we're done ! */ goto done; } else if (mappings_count <= SFM_MAX_STACK) { @@ -1393,7 +1519,8 @@ shared_region_map_and_slide_np( SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(): " "too many mappings (%d)\n", - current_thread(), p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, mappings_count)); kr = KERN_FAILURE; goto done; @@ -1404,29 +1531,22 @@ shared_region_map_and_slide_np( } - kr = _shared_region_map(p, uap->fd, mappings_count, mappings, &sr_file_control, &mapping_to_slide); + kr = _shared_region_map_and_slide(p, uap->fd, mappings_count, mappings, + slide, + uap->slide_start, uap->slide_size); if (kr != KERN_SUCCESS) { return kr; } - if (slide) { - kr = vm_shared_region_slide(slide, - mapping_to_slide.sfm_file_offset, - mapping_to_slide.sfm_size, - uap->slide_start, - uap->slide_size, - sr_file_control); - if (kr != KERN_SUCCESS) { - vm_shared_region_undo_mappings(NULL, 0, mappings, mappings_count); - return kr; - } - } done: return kr; } /* sysctl overflow room */ +SYSCTL_INT (_vm, OID_AUTO, pagesize, CTLFLAG_RD | CTLFLAG_LOCKED, + (int *) &page_size, 0, "vm page size"); + /* vm_page_free_target is provided as a makeshift solution for applications that want to allocate buffer space, possibly purgeable memory, but not cause inactive pages to be reclaimed. It allows the app to calculate how much memory is free outside the free target. */ @@ -1459,6 +1579,10 @@ extern unsigned int vm_page_purgeable_wired_count; SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_purgeable_wired_count, 0, "Wired purgeable page count"); +extern unsigned int vm_pageout_purged_objects; +SYSCTL_INT(_vm, OID_AUTO, pageout_purged_objects, CTLFLAG_RD | CTLFLAG_LOCKED, + &vm_pageout_purged_objects, 0, "System purged object count"); + extern int madvise_free_debug; SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &madvise_free_debug, 0, "zero-fill on madvise(MADV_FREE*)"); @@ -1529,6 +1653,11 @@ SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_commit_reactivated, CTLFLAG_RD | CTLF SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)"); SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)"); +/* counts of pages prefaulted when entering a memory object */ +extern int64_t vm_prefault_nb_pages, vm_prefault_nb_bailout; +SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_pages, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_pages, ""); +SYSCTL_QUAD(_vm, OID_AUTO, prefault_nb_bailout, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_prefault_nb_bailout, ""); + #include #include