X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/ebb1b9f42b62218f29061826217bb0f71cd375a6..ea3f04195ba4a5034c9c8e9b726d4f7ce96f1832:/osfmk/vm/vm_map.c diff --git a/osfmk/vm/vm_map.c b/osfmk/vm/vm_map.c index 2e1fbe691..ab0d87614 100644 --- a/osfmk/vm/vm_map.c +++ b/osfmk/vm/vm_map.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -65,6 +65,9 @@ #include #include + +#include + #include #include @@ -79,23 +82,26 @@ #include #include +#include #include +#include #include #include #include +#include +#include #include #include #include #include #include #include +#include #include #include #include #include -#include -#include #include #include @@ -106,193 +112,233 @@ #include #include +#include + +#include +#include +#if DEVELOPMENT || DEBUG +extern int proc_selfcsflags(void); +#if CONFIG_EMBEDDED +extern int panic_on_unsigned_execute; +#endif /* CONFIG_EMBEDDED */ +#endif /* DEVELOPMENT || DEBUG */ + +#if __arm64__ +extern const int fourk_binary_compatibility_unsafe; +extern const int fourk_binary_compatibility_allow_wx; +#endif /* __arm64__ */ +extern int proc_selfpid(void); +extern char *proc_name_address(void *p); + +#if VM_MAP_DEBUG_APPLE_PROTECT +int vm_map_debug_apple_protect = 0; +#endif /* VM_MAP_DEBUG_APPLE_PROTECT */ +#if VM_MAP_DEBUG_FOURK +int vm_map_debug_fourk = 0; +#endif /* VM_MAP_DEBUG_FOURK */ + +SECURITY_READ_ONLY_LATE(int) vm_map_executable_immutable = 1; +int vm_map_executable_immutable_verbose = 0; + +os_refgrp_decl(static, map_refgrp, "vm_map", NULL); + +extern u_int32_t random(void); /* from */ /* Internal prototypes */ static void vm_map_simplify_range( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); /* forward */ - -static boolean_t vm_map_range_check( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_entry_t *entry); - -static vm_map_entry_t _vm_map_entry_create( - struct vm_map_header *map_header); - -static void _vm_map_entry_dispose( - struct vm_map_header *map_header, - vm_map_entry_t entry); - -static void vm_map_pmap_enter( - vm_map_t map, - vm_map_offset_t addr, - vm_map_offset_t end_addr, - vm_object_t object, - vm_object_offset_t offset, - vm_prot_t protection); - -static void _vm_map_clip_end( - struct vm_map_header *map_header, - vm_map_entry_t entry, - vm_map_offset_t end); - -static void _vm_map_clip_start( - struct vm_map_header *map_header, - vm_map_entry_t entry, - vm_map_offset_t start); - -static void vm_map_entry_delete( - vm_map_t map, - vm_map_entry_t entry); - -static kern_return_t vm_map_delete( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - int flags, - vm_map_t zap_map); - -static kern_return_t vm_map_copy_overwrite_unaligned( - vm_map_t dst_map, - vm_map_entry_t entry, - vm_map_copy_t copy, - vm_map_address_t start); - -static kern_return_t vm_map_copy_overwrite_aligned( - vm_map_t dst_map, - vm_map_entry_t tmp_entry, - vm_map_copy_t copy, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); /* forward */ + +static boolean_t vm_map_range_check( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_entry_t *entry); + +static vm_map_entry_t _vm_map_entry_create( + struct vm_map_header *map_header, boolean_t map_locked); + +static void _vm_map_entry_dispose( + struct vm_map_header *map_header, + vm_map_entry_t entry); + +static void vm_map_pmap_enter( + vm_map_t map, + vm_map_offset_t addr, + vm_map_offset_t end_addr, + vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection); + +static void _vm_map_clip_end( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t end); + +static void _vm_map_clip_start( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t start); + +static void vm_map_entry_delete( + vm_map_t map, + vm_map_entry_t entry); + +static kern_return_t vm_map_delete( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + int flags, + vm_map_t zap_map); + +static void vm_map_copy_insert( + vm_map_t map, + vm_map_entry_t after_where, + vm_map_copy_t copy); + +static kern_return_t vm_map_copy_overwrite_unaligned( + vm_map_t dst_map, + vm_map_entry_t entry, + vm_map_copy_t copy, + vm_map_address_t start, + boolean_t discard_on_success); + +static kern_return_t vm_map_copy_overwrite_aligned( + vm_map_t dst_map, + vm_map_entry_t tmp_entry, + vm_map_copy_t copy, vm_map_offset_t start, - pmap_t pmap); + pmap_t pmap); -static kern_return_t vm_map_copyin_kernel_buffer( - vm_map_t src_map, +static kern_return_t vm_map_copyin_kernel_buffer( + vm_map_t src_map, vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - vm_map_copy_t *copy_result); /* OUT */ - -static kern_return_t vm_map_copyout_kernel_buffer( - vm_map_t map, - vm_map_address_t *addr, /* IN/OUT */ - vm_map_copy_t copy, - boolean_t overwrite); - -static void vm_map_fork_share( - vm_map_t old_map, - vm_map_entry_t old_entry, - vm_map_t new_map); - -static boolean_t vm_map_fork_copy( - vm_map_t old_map, - vm_map_entry_t *old_entry_p, - vm_map_t new_map); - -void vm_map_region_top_walk( - vm_map_entry_t entry, - vm_region_top_info_t top); - -void vm_map_region_walk( - vm_map_t map, - vm_map_offset_t va, - vm_map_entry_t entry, - vm_object_offset_t offset, - vm_object_size_t range, - vm_region_extended_info_t extended, - boolean_t look_for_pages); - -static kern_return_t vm_map_wire_nested( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t access_type, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr); - -static kern_return_t vm_map_unwire_nested( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr); - -static kern_return_t vm_map_overwrite_submap_recurse( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_size_t dst_size); - -static kern_return_t vm_map_copy_overwrite_nested( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_copy_t copy, - boolean_t interruptible, - pmap_t pmap, - boolean_t discard_on_success); - -static kern_return_t vm_map_remap_extract( - vm_map_t map, - vm_map_offset_t addr, - vm_map_size_t size, - boolean_t copy, - struct vm_map_header *map_header, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance, - boolean_t pageable); - -static kern_return_t vm_map_remap_range_allocate( - vm_map_t map, - vm_map_address_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_entry_t *map_entry); - -static void vm_map_region_look_for_page( - vm_map_t map, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result); /* OUT */ + +static kern_return_t vm_map_copyout_kernel_buffer( + vm_map_t map, + vm_map_address_t *addr, /* IN/OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size, + boolean_t overwrite, + boolean_t consume_on_success); + +static void vm_map_fork_share( + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map); + +static boolean_t vm_map_fork_copy( + vm_map_t old_map, + vm_map_entry_t *old_entry_p, + vm_map_t new_map, + int vm_map_copyin_flags); + +static kern_return_t vm_map_wire_nested( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); + +static kern_return_t vm_map_unwire_nested( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr); + +static kern_return_t vm_map_overwrite_submap_recurse( + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_size_t dst_size); + +static kern_return_t vm_map_copy_overwrite_nested( + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible, + pmap_t pmap, + boolean_t discard_on_success); + +static kern_return_t vm_map_remap_extract( + vm_map_t map, + vm_map_offset_t addr, + vm_map_size_t size, + boolean_t copy, + struct vm_map_header *map_header, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance, + boolean_t pageable, + boolean_t same_map, + vm_map_kernel_flags_t vmk_flags); + +static kern_return_t vm_map_remap_range_allocate( + vm_map_t map, + vm_map_address_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_entry_t *map_entry); + +static void vm_map_region_look_for_page( + vm_map_t map, vm_map_offset_t va, - vm_object_t object, - vm_object_offset_t offset, + vm_object_t object, + vm_object_offset_t offset, int max_refcnt, int depth, - vm_region_extended_info_t extended); + vm_region_extended_info_t extended, + mach_msg_type_number_t count); + +static int vm_map_region_count_obj_refs( + vm_map_entry_t entry, + vm_object_t object); + -static int vm_map_region_count_obj_refs( - vm_map_entry_t entry, - vm_object_t object); +static kern_return_t vm_map_willneed( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); +static kern_return_t vm_map_reuse_pages( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); -static kern_return_t vm_map_willneed( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static kern_return_t vm_map_reusable_pages( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); -static kern_return_t vm_map_reuse_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static kern_return_t vm_map_can_reuse( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); -static kern_return_t vm_map_reusable_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +#if MACH_ASSERT +static kern_return_t vm_map_pageout( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); +#endif /* MACH_ASSERT */ -static kern_return_t vm_map_can_reuse( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static void vm_map_corpse_footprint_destroy( + vm_map_t map); -#if CONFIG_FREEZE -struct default_freezer_table; -__private_extern__ void* default_freezer_mapping_create(vm_object_t, vm_offset_t); -__private_extern__ void default_freezer_mapping_free(void**, boolean_t all); -#endif +pid_t find_largest_process_vm_map_entries(void); /* * Macros to copy a vm_map_entry. We must be careful to correctly @@ -303,29 +349,112 @@ __private_extern__ void default_freezer_mapping_free(void**, boolean_t all); * wire count; it's used for map splitting and zone changing in * vm_map_copyout. */ -#define vm_map_entry_copy(NEW,OLD) \ -MACRO_BEGIN \ + +#if CONFIG_EMBEDDED + +/* + * The "used_for_jit" flag was copied from OLD to NEW in vm_map_entry_copy(). + * But for security reasons on embedded platforms, we don't want the + * new mapping to be "used for jit", so we always reset the flag here. + * Same for "pmap_cs_associated". + */ +#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW, OLD) \ +MACRO_BEGIN \ + (NEW)->used_for_jit = FALSE; \ + (NEW)->pmap_cs_associated = FALSE; \ +MACRO_END + +#else /* CONFIG_EMBEDDED */ + +/* + * The "used_for_jit" flag was copied from OLD to NEW in vm_map_entry_copy(). + * On macOS, the new mapping can be "used for jit". + */ +#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW, OLD) \ +MACRO_BEGIN \ + assert((NEW)->used_for_jit == (OLD)->used_for_jit); \ + assert((NEW)->pmap_cs_associated == FALSE); \ +MACRO_END + +#endif /* CONFIG_EMBEDDED */ + +#define vm_map_entry_copy(NEW, OLD) \ +MACRO_BEGIN \ +boolean_t _vmec_reserved = (NEW)->from_reserved_zone; \ *(NEW) = *(OLD); \ - (NEW)->is_shared = FALSE; \ + (NEW)->is_shared = FALSE; \ (NEW)->needs_wakeup = FALSE; \ (NEW)->in_transition = FALSE; \ (NEW)->wired_count = 0; \ (NEW)->user_wired_count = 0; \ - (NEW)->permanent = FALSE; \ + (NEW)->permanent = FALSE; \ + VM_MAP_ENTRY_COPY_CODE_SIGNING((NEW),(OLD)); \ + (NEW)->from_reserved_zone = _vmec_reserved; \ + if ((NEW)->iokit_acct) { \ + assertf(!(NEW)->use_pmap, "old %p new %p\n", (OLD), (NEW)); \ + (NEW)->iokit_acct = FALSE; \ + (NEW)->use_pmap = TRUE; \ + } \ + (NEW)->vme_resilient_codesign = FALSE; \ + (NEW)->vme_resilient_media = FALSE; \ + (NEW)->vme_atomic = FALSE; \ + (NEW)->vme_no_copy_on_read = FALSE; \ MACRO_END -#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD)) +#define vm_map_entry_copy_full(NEW, OLD) \ +MACRO_BEGIN \ +boolean_t _vmecf_reserved = (NEW)->from_reserved_zone; \ +(*(NEW) = *(OLD)); \ +(NEW)->from_reserved_zone = _vmecf_reserved; \ +MACRO_END + +/* + * Normal lock_read_to_write() returns FALSE/0 on failure. + * These functions evaluate to zero on success and non-zero value on failure. + */ +__attribute__((always_inline)) +int +vm_map_lock_read_to_write(vm_map_t map) +{ + if (lck_rw_lock_shared_to_exclusive(&(map)->lock)) { + DTRACE_VM(vm_map_lock_upgrade); + return 0; + } + return 1; +} + +__attribute__((always_inline)) +boolean_t +vm_map_try_lock(vm_map_t map) +{ + if (lck_rw_try_lock_exclusive(&(map)->lock)) { + DTRACE_VM(vm_map_lock_w); + return TRUE; + } + return FALSE; +} + +__attribute__((always_inline)) +boolean_t +vm_map_try_lock_read(vm_map_t map) +{ + if (lck_rw_try_lock_shared(&(map)->lock)) { + DTRACE_VM(vm_map_lock_r); + return TRUE; + } + return FALSE; +} /* * Decide if we want to allow processes to execute from their data or stack areas. - * override_nx() returns true if we do. Data/stack execution can be enabled independently + * override_nx() returns true if we do. Data/stack execution can be enabled independently * for 32 and 64 bit processes. Set the VM_ABI_32 or VM_ABI_64 flags in allow_data_exec * or allow_stack_exec to enable data execution for that type of data area for that particular * ABI (or both by or'ing the flags together). These are initialized in the architecture - * specific pmap files since the default behavior varies according to architecture. The - * main reason it varies is because of the need to provide binary compatibility with old - * applications that were written before these restrictions came into being. In the old - * days, an app could execute anything it could read, but this has slowly been tightened + * specific pmap files since the default behavior varies according to architecture. The + * main reason it varies is because of the need to provide binary compatibility with old + * applications that were written before these restrictions came into being. In the old + * days, an app could execute anything it could read, but this has slowly been tightened * up over time. The default behavior is: * * 32-bit PPC apps may execute from both stack and data areas @@ -333,9 +462,9 @@ MACRO_END * 64-bit PPC/Intel apps may not execute from either data or stack * * An application on any architecture may override these defaults by explicitly - * adding PROT_EXEC permission to the page in question with the mprotect(2) + * adding PROT_EXEC permission to the page in question with the mprotect(2) * system call. This code here just determines what happens when an app tries to - * execute from a page that lacks execute permission. + * execute from a page that lacks execute permission. * * Note that allow_data_exec or allow_stack_exec may also be modified by sysctl to change the * default behavior for both 32 and 64 bit apps on a system-wide basis. Furthermore, @@ -353,22 +482,28 @@ override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */ { int current_abi; + if (map->pmap == kernel_pmap) { + return FALSE; + } + /* * Determine if the app is running in 32 or 64 bit mode. */ - if (vm_map_is_64bit(map)) + if (vm_map_is_64bit(map)) { current_abi = VM_ABI_64; - else + } else { current_abi = VM_ABI_32; + } /* - * Determine if we should allow the execution based on whether it's a + * Determine if we should allow the execution based on whether it's a * stack or data area and the current architecture. */ - if (user_tag == VM_MEMORY_STACK) + if (user_tag == VM_MEMORY_STACK) { return allow_stack_exec & current_abi; + } return (allow_data_exec & current_abi) && (map->map_disallow_data_exec == FALSE); } @@ -391,7 +526,7 @@ override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */ * the asymmetric (delayed) strategy is used for shared temporary * objects instead of the symmetric (shadow) strategy. All maps * are now "top level" maps (either task map, kernel map or submap - * of the kernel map). + * of the kernel map). * * Since portions of maps are specified by start/end addreses, * which may not align with existing map entries, all @@ -417,10 +552,11 @@ override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */ * vm_object_copy_strategically() in vm_object.c. */ -static zone_t vm_map_zone; /* zone for vm_map structures */ -static zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ -static zone_t vm_map_kentry_zone; /* zone for kernel entry structures */ -static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ +static zone_t vm_map_zone; /* zone for vm_map structures */ +zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ +static zone_t vm_map_entry_reserved_zone; /* zone with reserve for non-blocking allocations */ +static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ +zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ /* @@ -429,16 +565,17 @@ static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ * vm_map_submap creates the submap. */ -vm_object_t vm_submap_object; +vm_object_t vm_submap_object; -static void *map_data; -static vm_size_t map_data_size; -static void *kentry_data; -static vm_size_t kentry_data_size; -static int kentry_count = 2048; /* to init kentry_data_size */ +static void *map_data; +static vm_size_t map_data_size; +static void *kentry_data; +static vm_size_t kentry_data_size; +static void *map_holes_data; +static vm_size_t map_holes_data_size; #if CONFIG_EMBEDDED -#define NO_COALESCE_LIMIT 0 +#define NO_COALESCE_LIMIT 0 #else #define NO_COALESCE_LIMIT ((1024 * 128) - 1) #endif @@ -450,12 +587,12 @@ unsigned int vm_map_set_cache_attr_count = 0; kern_return_t vm_map_set_cache_attr( - vm_map_t map, - vm_map_offset_t va) + vm_map_t map, + vm_map_offset_t va) { - vm_map_entry_t map_entry; - vm_object_t object; - kern_return_t kr = KERN_SUCCESS; + vm_map_entry_t map_entry; + vm_object_t object; + kern_return_t kr = KERN_SUCCESS; vm_map_lock_read(map); @@ -467,7 +604,7 @@ vm_map_set_cache_attr( kr = KERN_INVALID_ARGUMENT; goto done; } - object = map_entry->object.vm_object; + object = VME_OBJECT(map_entry); if (object == VM_OBJECT_NULL) { /* @@ -491,7 +628,7 @@ done: #if CONFIG_CODE_DECRYPTION /* * vm_map_apple_protected: - * This remaps the requested part of the object with an object backed by + * This remaps the requested part of the object with an object backed by * the decrypting pager. * crypt_info contains entry points and session data for the crypt module. * The crypt_info block will be copied by vm_map_apple_protected. The data structures @@ -499,96 +636,240 @@ done: */ kern_return_t vm_map_apple_protected( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_object_offset_t crypto_backing_offset, struct pager_crypt_info *crypt_info) { - boolean_t map_locked; - kern_return_t kr; - vm_map_entry_t map_entry; - memory_object_t protected_mem_obj; - vm_object_t protected_object; - vm_map_offset_t map_addr; - - vm_map_lock_read(map); - map_locked = TRUE; - - /* lookup the protected VM object */ - if (!vm_map_lookup_entry(map, - start, - &map_entry) || - map_entry->vme_end < end || - map_entry->is_sub_map) { - /* that memory is not properly mapped */ - kr = KERN_INVALID_ARGUMENT; - goto done; - } - protected_object = map_entry->object.vm_object; - if (protected_object == VM_OBJECT_NULL) { - /* there should be a VM object here at this point */ - kr = KERN_INVALID_ARGUMENT; - goto done; - } + boolean_t map_locked; + kern_return_t kr; + vm_map_entry_t map_entry; + struct vm_map_entry tmp_entry; + memory_object_t unprotected_mem_obj; + vm_object_t protected_object; + vm_map_offset_t map_addr; + vm_map_offset_t start_aligned, end_aligned; + vm_object_offset_t crypto_start, crypto_end; + int vm_flags; + vm_map_kernel_flags_t vmk_flags; + + vm_flags = 0; + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - /* make sure protected object stays alive while map is unlocked */ - vm_object_reference(protected_object); - - vm_map_unlock_read(map); map_locked = FALSE; + unprotected_mem_obj = MEMORY_OBJECT_NULL; + start_aligned = vm_map_trunc_page(start, PAGE_MASK_64); + end_aligned = vm_map_round_page(end, PAGE_MASK_64); + start_aligned = vm_map_trunc_page(start_aligned, VM_MAP_PAGE_MASK(map)); + end_aligned = vm_map_round_page(end_aligned, VM_MAP_PAGE_MASK(map)); + +#if __arm64__ /* - * Lookup (and create if necessary) the protected memory object - * matching that VM object. - * If successful, this also grabs a reference on the memory object, - * to guarantee that it doesn't go away before we get a chance to map - * it. + * "start" and "end" might be 4K-aligned but not 16K-aligned, + * so we might have to loop and establish up to 3 mappings: + * + * + the first 16K-page, which might overlap with the previous + * 4K-aligned mapping, + * + the center, + * + the last 16K-page, which might overlap with the next + * 4K-aligned mapping. + * Each of these mapping might be backed by a vnode pager (if + * properly page-aligned) or a "fourk_pager", itself backed by a + * vnode pager (if 4K-aligned but not page-aligned). */ - protected_mem_obj = apple_protect_pager_setup(protected_object, crypt_info); +#endif /* __arm64__ */ - /* release extra ref on protected object */ - vm_object_deallocate(protected_object); + map_addr = start_aligned; + for (map_addr = start_aligned; + map_addr < end; + map_addr = tmp_entry.vme_end) { + vm_map_lock(map); + map_locked = TRUE; - if (protected_mem_obj == NULL) { - kr = KERN_FAILURE; - goto done; - } + /* lookup the protected VM object */ + if (!vm_map_lookup_entry(map, + map_addr, + &map_entry) || + map_entry->is_sub_map || + VME_OBJECT(map_entry) == VM_OBJECT_NULL || + !(map_entry->protection & VM_PROT_EXECUTE)) { + /* that memory is not properly mapped */ + kr = KERN_INVALID_ARGUMENT; + goto done; + } + + /* get the protected object to be decrypted */ + protected_object = VME_OBJECT(map_entry); + if (protected_object == VM_OBJECT_NULL) { + /* there should be a VM object here at this point */ + kr = KERN_INVALID_ARGUMENT; + goto done; + } + /* ensure protected object stays alive while map is unlocked */ + vm_object_reference(protected_object); + + /* limit the map entry to the area we want to cover */ + vm_map_clip_start(map, map_entry, start_aligned); + vm_map_clip_end(map, map_entry, end_aligned); + + tmp_entry = *map_entry; + map_entry = VM_MAP_ENTRY_NULL; /* not valid after unlocking map */ + vm_map_unlock(map); + map_locked = FALSE; + + /* + * This map entry might be only partially encrypted + * (if not fully "page-aligned"). + */ + crypto_start = 0; + crypto_end = tmp_entry.vme_end - tmp_entry.vme_start; + if (tmp_entry.vme_start < start) { + if (tmp_entry.vme_start != start_aligned) { + kr = KERN_INVALID_ADDRESS; + } + crypto_start += (start - tmp_entry.vme_start); + } + if (tmp_entry.vme_end > end) { + if (tmp_entry.vme_end != end_aligned) { + kr = KERN_INVALID_ADDRESS; + } + crypto_end -= (tmp_entry.vme_end - end); + } + + /* + * This "extra backing offset" is needed to get the decryption + * routine to use the right key. It adjusts for the possibly + * relative offset of an interposed "4K" pager... + */ + if (crypto_backing_offset == (vm_object_offset_t) -1) { + crypto_backing_offset = VME_OFFSET(&tmp_entry); + } + + /* + * Lookup (and create if necessary) the protected memory object + * matching that VM object. + * If successful, this also grabs a reference on the memory object, + * to guarantee that it doesn't go away before we get a chance to map + * it. + */ + unprotected_mem_obj = apple_protect_pager_setup( + protected_object, + VME_OFFSET(&tmp_entry), + crypto_backing_offset, + crypt_info, + crypto_start, + crypto_end); + + /* release extra ref on protected object */ + vm_object_deallocate(protected_object); + + if (unprotected_mem_obj == NULL) { + kr = KERN_FAILURE; + goto done; + } + + vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE; + /* can overwrite an immutable mapping */ + vmk_flags.vmkf_overwrite_immutable = TRUE; +#if __arm64__ + if (tmp_entry.used_for_jit && + (VM_MAP_PAGE_SHIFT(map) != FOURK_PAGE_SHIFT || + PAGE_SHIFT != FOURK_PAGE_SHIFT) && + fourk_binary_compatibility_unsafe && + fourk_binary_compatibility_allow_wx) { + printf("** FOURK_COMPAT [%d]: " + "allowing write+execute at 0x%llx\n", + proc_selfpid(), tmp_entry.vme_start); + vmk_flags.vmkf_map_jit = TRUE; + } +#endif /* __arm64__ */ + + /* map this memory object in place of the current one */ + map_addr = tmp_entry.vme_start; + kr = vm_map_enter_mem_object(map, + &map_addr, + (tmp_entry.vme_end - + tmp_entry.vme_start), + (mach_vm_offset_t) 0, + vm_flags, + vmk_flags, + VM_KERN_MEMORY_NONE, + (ipc_port_t)(uintptr_t) unprotected_mem_obj, + 0, + TRUE, + tmp_entry.protection, + tmp_entry.max_protection, + tmp_entry.inheritance); + assertf(kr == KERN_SUCCESS, + "kr = 0x%x\n", kr); + assertf(map_addr == tmp_entry.vme_start, + "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n", + (uint64_t)map_addr, + (uint64_t) tmp_entry.vme_start, + &tmp_entry); + +#if VM_MAP_DEBUG_APPLE_PROTECT + if (vm_map_debug_apple_protect) { + printf("APPLE_PROTECT: map %p [0x%llx:0x%llx] pager %p:" + " backing:[object:%p,offset:0x%llx," + "crypto_backing_offset:0x%llx," + "crypto_start:0x%llx,crypto_end:0x%llx]\n", + map, + (uint64_t) map_addr, + (uint64_t) (map_addr + (tmp_entry.vme_end - + tmp_entry.vme_start)), + unprotected_mem_obj, + protected_object, + VME_OFFSET(&tmp_entry), + crypto_backing_offset, + crypto_start, + crypto_end); + } +#endif /* VM_MAP_DEBUG_APPLE_PROTECT */ + + /* + * Release the reference obtained by + * apple_protect_pager_setup(). + * The mapping (if it succeeded) is now holding a reference on + * the memory object. + */ + memory_object_deallocate(unprotected_mem_obj); + unprotected_mem_obj = MEMORY_OBJECT_NULL; - /* map this memory object in place of the current one */ - map_addr = start; - kr = vm_map_enter_mem_object(map, - &map_addr, - end - start, - (mach_vm_offset_t) 0, - VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, - (ipc_port_t) protected_mem_obj, - (map_entry->offset + - (start - map_entry->vme_start)), - TRUE, - map_entry->protection, - map_entry->max_protection, - map_entry->inheritance); - assert(map_addr == start); - /* - * Release the reference obtained by apple_protect_pager_setup(). - * The mapping (if it succeeded) is now holding a reference on the - * memory object. - */ - memory_object_deallocate(protected_mem_obj); + /* continue with next map entry */ + crypto_backing_offset += (tmp_entry.vme_end - + tmp_entry.vme_start); + crypto_backing_offset -= crypto_start; + } + kr = KERN_SUCCESS; done: if (map_locked) { - vm_map_unlock_read(map); + vm_map_unlock(map); } return kr; } -#endif /* CONFIG_CODE_DECRYPTION */ +#endif /* CONFIG_CODE_DECRYPTION */ -lck_grp_t vm_map_lck_grp; -lck_grp_attr_t vm_map_lck_grp_attr; -lck_attr_t vm_map_lck_attr; +lck_grp_t vm_map_lck_grp; +lck_grp_attr_t vm_map_lck_grp_attr; +lck_attr_t vm_map_lck_attr; +lck_attr_t vm_map_lck_rw_attr; +#if CONFIG_EMBEDDED +int malloc_no_cow = 1; +#define VM_PROTECT_WX_FAIL 0 +#else /* CONFIG_EMBEDDED */ +int malloc_no_cow = 0; +#define VM_PROTECT_WX_FAIL 1 +#endif /* CONFIG_EMBEDDED */ +uint64_t vm_memory_malloc_no_cow_mask = 0ULL; +#if DEBUG +int vm_check_map_sanity = 0; +#endif /* * vm_map_init: @@ -603,7 +884,7 @@ lck_attr_t vm_map_lck_attr; * * vm_map_zone: used to allocate maps. * vm_map_entry_zone: used to allocate map entries. - * vm_map_kentry_zone: used to allocate map entries for the kernel. + * vm_map_entry_reserved_zone: fallback zone for kernel map entries * * The kernel allocates map entries from a special zone that is initially * "crammed" with memory. It would be difficult (perhaps impossible) for @@ -615,65 +896,225 @@ void vm_map_init( void) { - vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40*1024, - PAGE_SIZE, "maps"); - zone_change(vm_map_zone, Z_NOENCRYPT, TRUE); + vm_size_t entry_zone_alloc_size; + const char *mez_name = "VM map entries"; + vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40 * 1024, + PAGE_SIZE, "maps"); + zone_change(vm_map_zone, Z_NOENCRYPT, TRUE); +#if defined(__LP64__) + entry_zone_alloc_size = PAGE_SIZE * 5; +#else + entry_zone_alloc_size = PAGE_SIZE * 6; +#endif vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), - 1024*1024, PAGE_SIZE*5, - "non-kernel map entries"); + 1024 * 1024, entry_zone_alloc_size, + mez_name); zone_change(vm_map_entry_zone, Z_NOENCRYPT, TRUE); + zone_change(vm_map_entry_zone, Z_NOCALLOUT, TRUE); + zone_change(vm_map_entry_zone, Z_GZALLOC_EXEMPT, TRUE); - vm_map_kentry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), - kentry_data_size, kentry_data_size, - "kernel map entries"); - zone_change(vm_map_kentry_zone, Z_NOENCRYPT, TRUE); + vm_map_entry_reserved_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), + kentry_data_size * 64, kentry_data_size, + "Reserved VM map entries"); + zone_change(vm_map_entry_reserved_zone, Z_NOENCRYPT, TRUE); + /* Don't quarantine because we always need elements available */ + zone_change(vm_map_entry_reserved_zone, Z_KASAN_QUARANTINE, FALSE); vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy), - 16*1024, PAGE_SIZE, "map copies"); + 16 * 1024, PAGE_SIZE, "VM map copies"); zone_change(vm_map_copy_zone, Z_NOENCRYPT, TRUE); + vm_map_holes_zone = zinit((vm_map_size_t) sizeof(struct vm_map_links), + 16 * 1024, PAGE_SIZE, "VM map holes"); + zone_change(vm_map_holes_zone, Z_NOENCRYPT, TRUE); + /* * Cram the map and kentry zones with initial data. - * Set kentry_zone non-collectible to aid zone_gc(). + * Set reserved_zone non-collectible to aid zone_gc(). */ zone_change(vm_map_zone, Z_COLLECT, FALSE); - zone_change(vm_map_kentry_zone, Z_COLLECT, FALSE); - zone_change(vm_map_kentry_zone, Z_EXPAND, FALSE); - zone_change(vm_map_kentry_zone, Z_FOREIGN, TRUE); - zone_change(vm_map_kentry_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ + zone_change(vm_map_zone, Z_FOREIGN, TRUE); + zone_change(vm_map_zone, Z_GZALLOC_EXEMPT, TRUE); + + zone_change(vm_map_entry_reserved_zone, Z_COLLECT, FALSE); + zone_change(vm_map_entry_reserved_zone, Z_EXPAND, FALSE); + zone_change(vm_map_entry_reserved_zone, Z_FOREIGN, TRUE); + zone_change(vm_map_entry_reserved_zone, Z_NOCALLOUT, TRUE); + zone_change(vm_map_entry_reserved_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ zone_change(vm_map_copy_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ + zone_change(vm_map_entry_reserved_zone, Z_GZALLOC_EXEMPT, TRUE); + + zone_change(vm_map_holes_zone, Z_COLLECT, TRUE); + zone_change(vm_map_holes_zone, Z_EXPAND, TRUE); + zone_change(vm_map_holes_zone, Z_FOREIGN, TRUE); + zone_change(vm_map_holes_zone, Z_NOCALLOUT, TRUE); + zone_change(vm_map_holes_zone, Z_CALLERACCT, TRUE); + zone_change(vm_map_holes_zone, Z_GZALLOC_EXEMPT, TRUE); + + /* + * Add the stolen memory to zones, adjust zone size and stolen counts. + * zcram only up to the maximum number of pages for each zone chunk. + */ + zcram(vm_map_zone, (vm_offset_t)map_data, map_data_size); + + const vm_size_t stride = ZONE_CHUNK_MAXPAGES * PAGE_SIZE; + for (vm_offset_t off = 0; off < kentry_data_size; off += stride) { + zcram(vm_map_entry_reserved_zone, + (vm_offset_t)kentry_data + off, + MIN(kentry_data_size - off, stride)); + } + for (vm_offset_t off = 0; off < map_holes_data_size; off += stride) { + zcram(vm_map_holes_zone, + (vm_offset_t)map_holes_data + off, + MIN(map_holes_data_size - off, stride)); + } + + /* + * Since these are covered by zones, remove them from stolen page accounting. + */ + VM_PAGE_MOVE_STOLEN(atop_64(map_data_size) + atop_64(kentry_data_size) + atop_64(map_holes_data_size)); - zcram(vm_map_zone, map_data, map_data_size); - zcram(vm_map_kentry_zone, kentry_data, kentry_data_size); - lck_grp_attr_setdefault(&vm_map_lck_grp_attr); lck_grp_init(&vm_map_lck_grp, "vm_map", &vm_map_lck_grp_attr); - lck_attr_setdefault(&vm_map_lck_attr); + lck_attr_setdefault(&vm_map_lck_attr); + + lck_attr_setdefault(&vm_map_lck_rw_attr); + lck_attr_cleardebug(&vm_map_lck_rw_attr); + +#if VM_MAP_DEBUG_APPLE_PROTECT + PE_parse_boot_argn("vm_map_debug_apple_protect", + &vm_map_debug_apple_protect, + sizeof(vm_map_debug_apple_protect)); +#endif /* VM_MAP_DEBUG_APPLE_PROTECT */ +#if VM_MAP_DEBUG_APPLE_FOURK + PE_parse_boot_argn("vm_map_debug_fourk", + &vm_map_debug_fourk, + sizeof(vm_map_debug_fourk)); +#endif /* VM_MAP_DEBUG_FOURK */ + PE_parse_boot_argn("vm_map_executable_immutable", + &vm_map_executable_immutable, + sizeof(vm_map_executable_immutable)); + PE_parse_boot_argn("vm_map_executable_immutable_verbose", + &vm_map_executable_immutable_verbose, + sizeof(vm_map_executable_immutable_verbose)); + + PE_parse_boot_argn("malloc_no_cow", + &malloc_no_cow, + sizeof(malloc_no_cow)); + if (malloc_no_cow) { + vm_memory_malloc_no_cow_mask = 0ULL; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_SMALL; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_MEDIUM; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_LARGE; +// vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_HUGE; +// vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_REALLOC; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_TINY; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_LARGE_REUSABLE; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_LARGE_REUSED; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_NANO; +// vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_TCMALLOC; + PE_parse_boot_argn("vm_memory_malloc_no_cow_mask", + &vm_memory_malloc_no_cow_mask, + sizeof(vm_memory_malloc_no_cow_mask)); + } + +#if DEBUG + PE_parse_boot_argn("vm_check_map_sanity", &vm_check_map_sanity, sizeof(vm_check_map_sanity)); + if (vm_check_map_sanity) { + kprintf("VM sanity checking enabled\n"); + } else { + kprintf("VM sanity checking disabled. Set bootarg vm_check_map_sanity=1 to enable\n"); + } +#endif /* DEBUG */ } void vm_map_steal_memory( void) { + uint32_t kentry_initial_pages; + map_data_size = round_page(10 * sizeof(struct _vm_map)); map_data = pmap_steal_memory(map_data_size); -#if 0 /* - * Limiting worst case: vm_map_kentry_zone needs to map each "available" - * physical page (i.e. that beyond the kernel image and page tables) - * individually; we guess at most one entry per eight pages in the - * real world. This works out to roughly .1 of 1% of physical memory, - * or roughly 1900 entries (64K) for a 64M machine with 4K pages. + * kentry_initial_pages corresponds to the number of kernel map entries + * required during bootstrap until the asynchronous replenishment + * scheme is activated and/or entries are available from the general + * map entry pool. */ +#if defined(__LP64__) + kentry_initial_pages = 10; +#else + kentry_initial_pages = 6; #endif - kentry_count = pmap_free_pages() / 8; +#if CONFIG_GZALLOC + /* If using the guard allocator, reserve more memory for the kernel + * reserved map entry pool. + */ + if (gzalloc_enabled()) { + kentry_initial_pages *= 1024; + } +#endif - kentry_data_size = - round_page(kentry_count * sizeof(struct vm_map_entry)); + kentry_data_size = kentry_initial_pages * PAGE_SIZE; kentry_data = pmap_steal_memory(kentry_data_size); + + map_holes_data_size = kentry_data_size; + map_holes_data = pmap_steal_memory(map_holes_data_size); +} + +boolean_t vm_map_supports_hole_optimization = FALSE; + +void +vm_kernel_reserved_entry_init(void) +{ + zone_prio_refill_configure(vm_map_entry_reserved_zone, (6 * PAGE_SIZE) / sizeof(struct vm_map_entry)); + + /* + * Once we have our replenish thread set up, we can start using the vm_map_holes zone. + */ + zone_prio_refill_configure(vm_map_holes_zone, (6 * PAGE_SIZE) / sizeof(struct vm_map_links)); + vm_map_supports_hole_optimization = TRUE; +} + +void +vm_map_disable_hole_optimization(vm_map_t map) +{ + vm_map_entry_t head_entry, hole_entry, next_hole_entry; + + if (map->holelistenabled) { + head_entry = hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); + + while (hole_entry != NULL) { + next_hole_entry = hole_entry->vme_next; + + hole_entry->vme_next = NULL; + hole_entry->vme_prev = NULL; + zfree(vm_map_holes_zone, hole_entry); + + if (next_hole_entry == head_entry) { + hole_entry = NULL; + } else { + hole_entry = next_hole_entry; + } + } + + map->holes_list = NULL; + map->holelistenabled = FALSE; + + map->first_free = vm_map_first_entry(map); + SAVE_HINT_HOLE_WRITE(map, NULL); + } +} + +boolean_t +vm_kernel_map_is_kernel(vm_map_t map) +{ + return map->pmap == kernel_pmap; } /* @@ -683,57 +1124,113 @@ vm_map_steal_memory( * the given physical map structure, and having * the given lower and upper address bounds. */ + vm_map_t vm_map_create( - pmap_t pmap, - vm_map_offset_t min, - vm_map_offset_t max, - boolean_t pageable) + pmap_t pmap, + vm_map_offset_t min, + vm_map_offset_t max, + boolean_t pageable) { - static int color_seed = 0; - register vm_map_t result; + int options; + + options = 0; + if (pageable) { + options |= VM_MAP_CREATE_PAGEABLE; + } + return vm_map_create_options(pmap, min, max, options); +} + +vm_map_t +vm_map_create_options( + pmap_t pmap, + vm_map_offset_t min, + vm_map_offset_t max, + int options) +{ + vm_map_t result; + struct vm_map_links *hole_entry = NULL; + + if (options & ~(VM_MAP_CREATE_ALL_OPTIONS)) { + /* unknown option */ + return VM_MAP_NULL; + } result = (vm_map_t) zalloc(vm_map_zone); - if (result == VM_MAP_NULL) + if (result == VM_MAP_NULL) { panic("vm_map_create"); + } vm_map_first_entry(result) = vm_map_to_entry(result); vm_map_last_entry(result) = vm_map_to_entry(result); result->hdr.nentries = 0; - result->hdr.entries_pageable = pageable; + if (options & VM_MAP_CREATE_PAGEABLE) { + result->hdr.entries_pageable = TRUE; + } else { + result->hdr.entries_pageable = FALSE; + } + + vm_map_store_init( &(result->hdr)); + + result->hdr.page_shift = PAGE_SHIFT; - vm_map_store_init( &(result->hdr) ); - result->size = 0; - result->user_wire_limit = MACH_VM_MAX_ADDRESS; /* default limit is unlimited */ + result->user_wire_limit = MACH_VM_MAX_ADDRESS; /* default limit is unlimited */ result->user_wire_size = 0; - result->ref_count = 1; -#if TASK_SWAPPER +#if !CONFIG_EMBEDDED + result->vmmap_high_start = 0; +#endif + os_ref_init_count(&result->map_refcnt, &map_refgrp, 1); +#if TASK_SWAPPER result->res_count = 1; result->sw_state = MAP_SW_IN; -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ result->pmap = pmap; result->min_offset = min; result->max_offset = max; result->wiring_required = FALSE; result->no_zero_fill = FALSE; - result->mapped = FALSE; + result->mapped_in_other_pmaps = FALSE; result->wait_for_space = FALSE; result->switch_protect = FALSE; result->disable_vmentry_reuse = FALSE; result->map_disallow_data_exec = FALSE; + result->is_nested_map = FALSE; + result->map_disallow_new_exec = FALSE; + result->terminated = FALSE; result->highest_entry_end = 0; result->first_free = vm_map_to_entry(result); result->hint = vm_map_to_entry(result); - result->color_rr = (color_seed++) & vm_color_mask; - result->jit_entry_exists = FALSE; -#if CONFIG_FREEZE - result->default_freezer_toc = NULL; + result->jit_entry_exists = FALSE; + + /* "has_corpse_footprint" and "holelistenabled" are mutually exclusive */ + if (options & VM_MAP_CREATE_CORPSE_FOOTPRINT) { + result->has_corpse_footprint = TRUE; + result->holelistenabled = FALSE; + result->vmmap_corpse_footprint = NULL; + } else { + result->has_corpse_footprint = FALSE; + if (vm_map_supports_hole_optimization) { + hole_entry = zalloc(vm_map_holes_zone); + + hole_entry->start = min; +#if defined(__arm__) || defined(__arm64__) + hole_entry->end = result->max_offset; +#else + hole_entry->end = (max > (vm_map_offset_t)MACH_VM_MAX_ADDRESS) ? max : (vm_map_offset_t)MACH_VM_MAX_ADDRESS; #endif + result->holes_list = result->hole_hint = hole_entry; + hole_entry->prev = hole_entry->next = CAST_TO_VM_MAP_ENTRY(hole_entry); + result->holelistenabled = TRUE; + } else { + result->holelistenabled = FALSE; + } + } + vm_map_lock_init(result); lck_mtx_init_ext(&result->s_lock, &result->s_lock_ext, &vm_map_lck_grp, &vm_map_lck_attr); - - return(result); + + return result; } /* @@ -742,30 +1239,49 @@ vm_map_create( * Allocates a VM map entry for insertion in the * given map (or map copy). No fields are filled. */ -#define vm_map_entry_create(map) \ - _vm_map_entry_create(&(map)->hdr) +#define vm_map_entry_create(map, map_locked) _vm_map_entry_create(&(map)->hdr, map_locked) -#define vm_map_copy_entry_create(copy) \ - _vm_map_entry_create(&(copy)->cpy_hdr) +#define vm_map_copy_entry_create(copy, map_locked) \ + _vm_map_entry_create(&(copy)->cpy_hdr, map_locked) +unsigned reserved_zalloc_count, nonreserved_zalloc_count; static vm_map_entry_t _vm_map_entry_create( - register struct vm_map_header *map_header) + struct vm_map_header *map_header, boolean_t __unused map_locked) { - register zone_t zone; - register vm_map_entry_t entry; + zone_t zone; + vm_map_entry_t entry; - if (map_header->entries_pageable) - zone = vm_map_entry_zone; - else - zone = vm_map_kentry_zone; + zone = vm_map_entry_zone; + + assert(map_header->entries_pageable ? !map_locked : TRUE); + + if (map_header->entries_pageable) { + entry = (vm_map_entry_t) zalloc(zone); + } else { + entry = (vm_map_entry_t) zalloc_canblock(zone, FALSE); + + if (entry == VM_MAP_ENTRY_NULL) { + zone = vm_map_entry_reserved_zone; + entry = (vm_map_entry_t) zalloc(zone); + OSAddAtomic(1, &reserved_zalloc_count); + } else { + OSAddAtomic(1, &nonreserved_zalloc_count); + } + } - entry = (vm_map_entry_t) zalloc(zone); - if (entry == VM_MAP_ENTRY_NULL) + if (entry == VM_MAP_ENTRY_NULL) { panic("vm_map_entry_create"); - vm_map_store_update( (vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE); + } + entry->from_reserved_zone = (zone == vm_map_entry_reserved_zone); - return(entry); + vm_map_store_update((vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE); +#if MAP_ENTRY_CREATION_DEBUG + entry->vme_creation_maphdr = map_header; + backtrace(&entry->vme_creation_bt[0], + (sizeof(entry->vme_creation_bt) / sizeof(uintptr_t)), NULL); +#endif + return entry; } /* @@ -773,28 +1289,36 @@ _vm_map_entry_create( * * Inverse of vm_map_entry_create. * - * write map lock held so no need to + * write map lock held so no need to * do anything special to insure correctness - * of the stores + * of the stores */ -#define vm_map_entry_dispose(map, entry) \ - vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE); \ +#define vm_map_entry_dispose(map, entry) \ _vm_map_entry_dispose(&(map)->hdr, (entry)) -#define vm_map_copy_entry_dispose(map, entry) \ +#define vm_map_copy_entry_dispose(map, entry) \ _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry)) static void _vm_map_entry_dispose( - register struct vm_map_header *map_header, - register vm_map_entry_t entry) + struct vm_map_header *map_header, + vm_map_entry_t entry) { - register zone_t zone; + zone_t zone; - if (map_header->entries_pageable) + if (map_header->entries_pageable || !(entry->from_reserved_zone)) { zone = vm_map_entry_zone; - else - zone = vm_map_kentry_zone; + } else { + zone = vm_map_entry_reserved_zone; + } + + if (!map_header->entries_pageable) { + if (zone == vm_map_entry_zone) { + OSAddAtomic(-1, &nonreserved_zalloc_count); + } else { + OSAddAtomic(-1, &reserved_zalloc_count); + } + } zfree(zone, entry); } @@ -803,23 +1327,24 @@ _vm_map_entry_dispose( static boolean_t first_free_check = FALSE; boolean_t first_free_is_valid( - vm_map_t map) + vm_map_t map) { - if (!first_free_check) + if (!first_free_check) { return TRUE; - - return( first_free_is_valid_store( map )); + } + + return first_free_is_valid_store( map ); } #endif /* MACH_ASSERT */ -#define vm_map_copy_entry_link(copy, after_where, entry) \ +#define vm_map_copy_entry_link(copy, after_where, entry) \ _vm_map_store_entry_link(&(copy)->cpy_hdr, after_where, (entry)) -#define vm_map_copy_entry_unlink(copy, entry) \ +#define vm_map_copy_entry_unlink(copy, entry) \ _vm_map_store_entry_unlink(&(copy)->cpy_hdr, (entry)) -#if MACH_ASSERT && TASK_SWAPPER +#if MACH_ASSERT && TASK_SWAPPER /* * vm_map_res_reference: * @@ -829,11 +1354,12 @@ first_free_is_valid( * vm_map_swapin. * */ -void vm_map_res_reference(register vm_map_t map) +void +vm_map_res_reference(vm_map_t map) { /* assert map is locked */ assert(map->res_count >= 0); - assert(map->ref_count >= map->res_count); + assert(os_ref_get_count(&map->map_refcnt) >= map->res_count); if (map->res_count == 0) { lck_mtx_unlock(&map->s_lock); vm_map_lock(map); @@ -841,8 +1367,9 @@ void vm_map_res_reference(register vm_map_t map) lck_mtx_lock(&map->s_lock); ++map->res_count; vm_map_unlock(map); - } else + } else { ++map->res_count; + } } /* @@ -853,13 +1380,14 @@ void vm_map_res_reference(register vm_map_t map) * The map may not be in memory (i.e. zero residence count). * */ -void vm_map_reference_swap(register vm_map_t map) +void +vm_map_reference_swap(vm_map_t map) { assert(map != VM_MAP_NULL); lck_mtx_lock(&map->s_lock); assert(map->res_count >= 0); - assert(map->ref_count >= map->res_count); - map->ref_count++; + assert(os_ref_get_count(&map->map_refcnt) >= map->res_count); + os_ref_retain_locked(&map->map_refcnt); vm_map_res_reference(map); lck_mtx_unlock(&map->s_lock); } @@ -874,7 +1402,8 @@ void vm_map_reference_swap(register vm_map_t map) * The map is locked, so this function is callable from vm_map_deallocate. * */ -void vm_map_res_deallocate(register vm_map_t map) +void +vm_map_res_deallocate(vm_map_t map) { assert(map->res_count > 0); if (--map->res_count == 0) { @@ -884,9 +1413,9 @@ void vm_map_res_deallocate(register vm_map_t map) vm_map_unlock(map); lck_mtx_lock(&map->s_lock); } - assert(map->ref_count >= map->res_count); + assert(os_ref_get_count(&map->map_refcnt) >= map->res_count); } -#endif /* MACH_ASSERT && TASK_SWAPPER */ +#endif /* MACH_ASSERT && TASK_SWAPPER */ /* * vm_map_destroy: @@ -895,43 +1424,106 @@ void vm_map_res_deallocate(register vm_map_t map) */ void vm_map_destroy( - vm_map_t map, - int flags) -{ + vm_map_t map, + int flags) +{ vm_map_lock(map); + /* final cleanup: no need to unnest shared region */ + flags |= VM_MAP_REMOVE_NO_UNNESTING; + /* final cleanup: ok to remove immutable mappings */ + flags |= VM_MAP_REMOVE_IMMUTABLE; + /* final cleanup: allow gaps in range */ + flags |= VM_MAP_REMOVE_GAPS_OK; + /* clean up regular map entries */ (void) vm_map_delete(map, map->min_offset, map->max_offset, - flags, VM_MAP_NULL); + flags, VM_MAP_NULL); /* clean up leftover special mappings (commpage, etc...) */ +#if !defined(__arm__) && !defined(__arm64__) (void) vm_map_delete(map, 0x0, 0xFFFFFFFFFFFFF000ULL, - flags, VM_MAP_NULL); + flags, VM_MAP_NULL); +#endif /* !__arm__ && !__arm64__ */ + + vm_map_disable_hole_optimization(map); + vm_map_corpse_footprint_destroy(map); -#if CONFIG_FREEZE - if (map->default_freezer_toc){ - default_freezer_mapping_free( &(map->default_freezer_toc), TRUE); - } -#endif vm_map_unlock(map); assert(map->hdr.nentries == 0); - - if(map->pmap) + + if (map->pmap) { pmap_destroy(map->pmap); + } + + if (vm_map_lck_attr.lck_attr_val & LCK_ATTR_DEBUG) { + /* + * If lock debugging is enabled the mutexes get tagged as LCK_MTX_TAG_INDIRECT. + * And this is regardless of whether the lck_mtx_ext_t is embedded in the + * structure or kalloc'ed via lck_mtx_init. + * An example is s_lock_ext within struct _vm_map. + * + * A lck_mtx_destroy on such a mutex will attempt a kfree and panic. We + * can add another tag to detect embedded vs alloc'ed indirect external + * mutexes but that'll be additional checks in the lock path and require + * updating dependencies for the old vs new tag. + * + * Since the kfree() is for LCK_MTX_TAG_INDIRECT mutexes and that tag is applied + * just when lock debugging is ON, we choose to forego explicitly destroying + * the vm_map mutex and rw lock and, as a consequence, will overflow the reference + * count on vm_map_lck_grp, which has no serious side-effect. + */ + } else { + lck_rw_destroy(&(map)->lock, &vm_map_lck_grp); + lck_mtx_destroy(&(map)->s_lock, &vm_map_lck_grp); + } zfree(vm_map_zone, map); } -#if TASK_SWAPPER +/* + * Returns pid of the task with the largest number of VM map entries. + * Used in the zone-map-exhaustion jetsam path. + */ +pid_t +find_largest_process_vm_map_entries(void) +{ + pid_t victim_pid = -1; + int max_vm_map_entries = 0; + task_t task = TASK_NULL; + queue_head_t *task_list = &tasks; + + lck_mtx_lock(&tasks_threads_lock); + queue_iterate(task_list, task, task_t, tasks) { + if (task == kernel_task || !task->active) { + continue; + } + + vm_map_t task_map = task->map; + if (task_map != VM_MAP_NULL) { + int task_vm_map_entries = task_map->hdr.nentries; + if (task_vm_map_entries > max_vm_map_entries) { + max_vm_map_entries = task_vm_map_entries; + victim_pid = pid_from_task(task); + } + } + } + lck_mtx_unlock(&tasks_threads_lock); + + printf("zone_map_exhaustion: victim pid %d, vm region count: %d\n", victim_pid, max_vm_map_entries); + return victim_pid; +} + +#if TASK_SWAPPER /* * vm_map_swapin/vm_map_swapout * - * Swap a map in and out, either referencing or releasing its resources. + * Swap a map in and out, either referencing or releasing its resources. * These functions are internal use only; however, they must be exported * because they may be called from macros, which are exported. * - * In the case of swapout, there could be races on the residence count, - * so if the residence count is up, we return, assuming that a + * In the case of swapout, there could be races on the residence count, + * so if the residence count is up, we return, assuming that a * vm_map_deallocate() call in the near future will bring us back. * * Locking: @@ -947,7 +1539,7 @@ vm_map_destroy( * 2) A vm_map_reference() call at this time is illegal, and will * cause a panic. vm_map_reference() is only allowed on resident * maps, since it refuses to block. - * 3) A vm_map_swapin() call during a swapin will block, and + * 3) A vm_map_swapin() call during a swapin will block, and * proceeed when the first swapin is done, turning into a nop. * This is the reason the res_count is not incremented until * after the swapin is complete. @@ -955,9 +1547,9 @@ vm_map_destroy( * the map lock is taken, during which a swapin may get the lock * before a swapout about to happen. If this happens, the swapin * will detect the state and increment the reference count, causing - * the swapout to be a nop, thereby delaying it until a later - * vm_map_deallocate. If the swapout gets the lock first, then - * the swapin will simply block until the swapout is done, and + * the swapout to be a nop, thereby delaying it until a later + * vm_map_deallocate. If the swapout gets the lock first, then + * the swapin will simply block until the swapout is done, and * then proceed. * * Because vm_map_swapin() is potentially an expensive operation, it @@ -972,23 +1564,26 @@ vm_map_destroy( int vm_map_swap_enable = 1; -void vm_map_swapin (vm_map_t map) +void +vm_map_swapin(vm_map_t map) { - register vm_map_entry_t entry; + vm_map_entry_t entry; - if (!vm_map_swap_enable) /* debug */ + if (!vm_map_swap_enable) { /* debug */ return; + } /* * Map is locked * First deal with various races. */ - if (map->sw_state == MAP_SW_IN) - /* + if (map->sw_state == MAP_SW_IN) { + /* * we raced with swapout and won. Returning will incr. * the res_count, turning the swapout into a nop. */ return; + } /* * The residence count must be zero. If we raced with another @@ -1006,7 +1601,7 @@ void vm_map_swapin (vm_map_t map) assert(map->sw_state == MAP_SW_OUT); /* - * We now operate upon each map entry. If the entry is a sub- + * We now operate upon each map entry. If the entry is a sub- * or share-map, we call vm_map_res_reference upon it. * If the entry is an object, we call vm_object_res_reference * (this may iterate through the shadow chain). @@ -1017,14 +1612,14 @@ void vm_map_swapin (vm_map_t map) entry = vm_map_first_entry(map); while (entry != vm_map_to_entry(map)) { - if (entry->object.vm_object != VM_OBJECT_NULL) { + if (VME_OBJECT(entry) != VM_OBJECT_NULL) { if (entry->is_sub_map) { - vm_map_t lmap = entry->object.sub_map; + vm_map_t lmap = VME_SUBMAP(entry); lck_mtx_lock(&lmap->s_lock); vm_map_res_reference(lmap); lck_mtx_unlock(&lmap->s_lock); } else { - vm_object_t object = entry->object.vm_object; + vm_object_t object = VME_OBEJCT(entry); vm_object_lock(object); /* * This call may iterate through the @@ -1040,10 +1635,11 @@ void vm_map_swapin (vm_map_t map) map->sw_state = MAP_SW_IN; } -void vm_map_swapout(vm_map_t map) +void +vm_map_swapout(vm_map_t map) { - register vm_map_entry_t entry; - + vm_map_entry_t entry; + /* * Map is locked * First deal with various races. @@ -1063,11 +1659,12 @@ void vm_map_swapout(vm_map_t map) */ assert(map->sw_state == MAP_SW_IN); - if (!vm_map_swap_enable) + if (!vm_map_swap_enable) { return; + } /* - * We now operate upon each map entry. If the entry is a sub- + * We now operate upon each map entry. If the entry is a sub- * or share-map, we call vm_map_res_deallocate upon it. * If the entry is an object, we call vm_object_res_deallocate * (this may iterate through the shadow chain). @@ -1078,19 +1675,19 @@ void vm_map_swapout(vm_map_t map) entry = vm_map_first_entry(map); while (entry != vm_map_to_entry(map)) { - if (entry->object.vm_object != VM_OBJECT_NULL) { + if (VME_OBJECT(entry) != VM_OBJECT_NULL) { if (entry->is_sub_map) { - vm_map_t lmap = entry->object.sub_map; + vm_map_t lmap = VME_SUBMAP(entry); lck_mtx_lock(&lmap->s_lock); vm_map_res_deallocate(lmap); lck_mtx_unlock(&lmap->s_lock); } else { - vm_object_t object = entry->object.vm_object; + vm_object_t object = VME_OBJECT(entry); vm_object_lock(object); /* - * This call may take a long time, - * since it could actively push - * out pages (if we implement it + * This call may take a long time, + * since it could actively push + * out pages (if we implement it * that way). */ vm_object_res_deallocate(object); @@ -1103,13 +1700,13 @@ void vm_map_swapout(vm_map_t map) map->sw_state = MAP_SW_OUT; } -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ /* * vm_map_lookup_entry: [ internal use only ] * - * Calls into the vm map store layer to find the map - * entry containing (or immediately preceding) the + * Calls into the vm map store layer to find the map + * entry containing (or immediately preceding) the * specified address in the given map; the entry is returned * in the "entry" parameter. The boolean * result indicates whether the address is @@ -1117,11 +1714,11 @@ void vm_map_swapout(vm_map_t map) */ boolean_t vm_map_lookup_entry( - register vm_map_t map, - register vm_map_offset_t address, - vm_map_entry_t *entry) /* OUT */ + vm_map_t map, + vm_map_offset_t address, + vm_map_entry_t *entry) /* OUT */ { - return ( vm_map_store_lookup_entry( map, address, entry )); + return vm_map_store_lookup_entry( map, address, entry ); } /* @@ -1139,28 +1736,31 @@ vm_map_lookup_entry( */ kern_return_t vm_map_find_space( - register vm_map_t map, - vm_map_offset_t *address, /* OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_entry_t *o_entry) /* OUT */ + vm_map_t map, + vm_map_offset_t *address, /* OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags __unused, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_entry_t *o_entry) /* OUT */ { - register vm_map_entry_t entry, new_entry; - register vm_map_offset_t start; - register vm_map_offset_t end; + vm_map_entry_t entry, new_entry; + vm_map_offset_t start; + vm_map_offset_t end; + vm_map_entry_t hole_entry; if (size == 0) { *address = 0; return KERN_INVALID_ARGUMENT; } - if (flags & VM_FLAGS_GUARD_AFTER) { + if (vmk_flags.vmkf_guard_after) { /* account for the back guard page in the size */ - size += PAGE_SIZE_64; + size += VM_MAP_PAGE_SIZE(map); } - new_entry = vm_map_entry_create(map); + new_entry = vm_map_entry_create(map, FALSE); /* * Look for the first possible address; if there's already @@ -1169,14 +1769,31 @@ vm_map_find_space( vm_map_lock(map); - if( map->disable_vmentry_reuse == TRUE) { + if (map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(map, entry, start); } else { - assert(first_free_is_valid(map)); - if ((entry = map->first_free) == vm_map_to_entry(map)) - start = map->min_offset; - else - start = entry->vme_end; + if (map->holelistenabled) { + hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); + + if (hole_entry == NULL) { + /* + * No more space in the map? + */ + vm_map_entry_dispose(map, new_entry); + vm_map_unlock(map); + return KERN_NO_SPACE; + } + + entry = hole_entry; + start = entry->vme_start; + } else { + assert(first_free_is_valid(map)); + if ((entry = map->first_free) == vm_map_to_entry(map)) { + start = map->min_offset; + } else { + start = entry->vme_end; + } + } } /* @@ -1185,7 +1802,7 @@ vm_map_find_space( */ while (TRUE) { - register vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -1193,48 +1810,78 @@ vm_map_find_space( * wrap around the address. */ - if (flags & VM_FLAGS_GUARD_BEFORE) { + if (vmk_flags.vmkf_guard_before) { /* reserve space for the front guard page */ - start += PAGE_SIZE_64; + start += VM_MAP_PAGE_SIZE(map); } end = ((start + mask) & ~mask); - + if (end < start) { vm_map_entry_dispose(map, new_entry); vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } start = end; + assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); end += size; + assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); if ((end > map->max_offset) || (end < start)) { vm_map_entry_dispose(map, new_entry); vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } - /* - * If there are no more entries, we must win. - */ - next = entry->vme_next; - if (next == vm_map_to_entry(map)) - break; - /* - * If there is another entry, it must be - * after the end of the potential new region. - */ + if (map->holelistenabled) { + if (entry->vme_end >= end) { + break; + } + } else { + /* + * If there are no more entries, we must win. + * + * OR + * + * If there is another entry, it must be + * after the end of the potential new region. + */ - if (next->vme_start >= end) - break; + if (next == vm_map_to_entry(map)) { + break; + } + + if (next->vme_start >= end) { + break; + } + } /* * Didn't fit -- move to the next entry. */ entry = next; - start = entry->vme_end; + + if (map->holelistenabled) { + if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { + /* + * Wrapped around + */ + vm_map_entry_dispose(map, new_entry); + vm_map_unlock(map); + return KERN_NO_SPACE; + } + start = entry->vme_start; + } else { + start = entry->vme_end; + } + } + + if (map->holelistenabled) { + if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { + panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); + } } /* @@ -1247,22 +1894,27 @@ vm_map_find_space( * the map should be locked. */ - if (flags & VM_FLAGS_GUARD_BEFORE) { + if (vmk_flags.vmkf_guard_before) { /* go back for the front guard page */ - start -= PAGE_SIZE_64; + start -= VM_MAP_PAGE_SIZE(map); } *address = start; + assert(start < end); new_entry->vme_start = start; new_entry->vme_end = end; assert(page_aligned(new_entry->vme_start)); assert(page_aligned(new_entry->vme_end)); + assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, + VM_MAP_PAGE_MASK(map))); + assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, + VM_MAP_PAGE_MASK(map))); new_entry->is_shared = FALSE; new_entry->is_sub_map = FALSE; - new_entry->use_pmap = FALSE; - new_entry->object.vm_object = VM_OBJECT_NULL; - new_entry->offset = (vm_object_offset_t) 0; + new_entry->use_pmap = TRUE; + VME_OBJECT_SET(new_entry, VM_OBJECT_NULL); + VME_OFFSET_SET(new_entry, (vm_object_offset_t) 0); new_entry->needs_copy = FALSE; @@ -1277,18 +1929,32 @@ vm_map_find_space( new_entry->needs_wakeup = FALSE; new_entry->no_cache = FALSE; new_entry->permanent = FALSE; - new_entry->superpage_size = 0; + new_entry->superpage_size = FALSE; + if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) { + new_entry->map_aligned = TRUE; + } else { + new_entry->map_aligned = FALSE; + } - new_entry->alias = 0; + new_entry->used_for_jit = FALSE; + new_entry->pmap_cs_associated = FALSE; new_entry->zero_wired_pages = FALSE; + new_entry->iokit_acct = FALSE; + new_entry->vme_resilient_codesign = FALSE; + new_entry->vme_resilient_media = FALSE; + if (vmk_flags.vmkf_atomic_entry) { + new_entry->vme_atomic = TRUE; + } else { + new_entry->vme_atomic = FALSE; + } - VM_GET_FLAGS_ALIAS(flags, new_entry->alias); + VME_ALIAS_SET(new_entry, tag); /* * Insert the new entry into the list */ - vm_map_store_entry_link(map, entry, new_entry); + vm_map_store_entry_link(map, entry, new_entry, VM_MAP_KERNEL_FLAGS_NONE); map->size += size; @@ -1298,7 +1964,7 @@ vm_map_find_space( SAVE_HINT_MAP_WRITE(map, new_entry); *o_entry = new_entry; - return(KERN_SUCCESS); + return KERN_SUCCESS; } int vm_map_pmap_enter_print = FALSE; @@ -1313,40 +1979,48 @@ int vm_map_pmap_enter_enable = FALSE; * As soon as a page not found in the object the scan ends. * * Returns: - * Nothing. + * Nothing. * * In/out conditions: * The source map should not be locked on entry. */ -static void +__unused static void vm_map_pmap_enter( - vm_map_t map, - register vm_map_offset_t addr, - register vm_map_offset_t end_addr, - register vm_object_t object, - vm_object_offset_t offset, - vm_prot_t protection) + vm_map_t map, + vm_map_offset_t addr, + vm_map_offset_t end_addr, + vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection) { - int type_of_fault; - kern_return_t kr; + int type_of_fault; + kern_return_t kr; + struct vm_object_fault_info fault_info = {}; - if(map->pmap == 0) + if (map->pmap == 0) { return; + } while (addr < end_addr) { - register vm_page_t m; + vm_page_t m; - vm_object_lock(object); - m = vm_page_lookup(object, offset); /* - * ENCRYPTED SWAP: - * The user should never see encrypted data, so do not - * enter an encrypted page in the page table. + * TODO: + * From vm_map_enter(), we come into this function without the map + * lock held or the object lock held. + * We haven't taken a reference on the object either. + * We should do a proper lookup on the map to make sure + * that things are sane before we go locking objects that + * could have been deallocated from under us. */ - if (m == VM_PAGE_NULL || m->busy || m->encrypted || - m->fictitious || - (m->unusual && ( m->error || m->restart || m->absent))) { + + vm_object_lock(object); + + m = vm_page_lookup(object, offset); + + if (m == VM_PAGE_NULL || m->vmp_busy || m->vmp_fictitious || + (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) { vm_object_unlock(object); return; } @@ -1354,12 +2028,17 @@ vm_map_pmap_enter( if (vm_map_pmap_enter_print) { printf("vm_map_pmap_enter:"); printf("map: %p, addr: %llx, object: %p, offset: %llx\n", - map, (unsigned long long)addr, object, (unsigned long long)offset); + map, (unsigned long long)addr, object, (unsigned long long)offset); } type_of_fault = DBG_CACHE_HIT_FAULT; - kr = vm_fault_enter(m, map->pmap, addr, protection, protection, - VM_PAGE_WIRED(m), FALSE, FALSE, FALSE, - &type_of_fault); + kr = vm_fault_enter(m, map->pmap, + addr, protection, protection, + VM_PAGE_WIRED(m), + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + &fault_info, + NULL, /* need_retry */ + &type_of_fault); vm_object_unlock(object); @@ -1369,38 +2048,109 @@ vm_map_pmap_enter( } boolean_t vm_map_pmap_is_empty( - vm_map_t map, - vm_map_offset_t start, + vm_map_t map, + vm_map_offset_t start, vm_map_offset_t end); -boolean_t vm_map_pmap_is_empty( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) +boolean_t +vm_map_pmap_is_empty( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { #ifdef MACHINE_PMAP_IS_EMPTY return pmap_is_empty(map->pmap, start, end); -#else /* MACHINE_PMAP_IS_EMPTY */ - vm_map_offset_t offset; - ppnum_t phys_page; +#else /* MACHINE_PMAP_IS_EMPTY */ + vm_map_offset_t offset; + ppnum_t phys_page; if (map->pmap == NULL) { return TRUE; } for (offset = start; - offset < end; - offset += PAGE_SIZE) { + offset < end; + offset += PAGE_SIZE) { phys_page = pmap_find_phys(map->pmap, offset); if (phys_page) { kprintf("vm_map_pmap_is_empty(%p,0x%llx,0x%llx): " - "page %d at 0x%llx\n", - map, (long long)start, (long long)end, - phys_page, (long long)offset); + "page %d at 0x%llx\n", + map, (long long)start, (long long)end, + phys_page, (long long)offset); return FALSE; } } return TRUE; -#endif /* MACHINE_PMAP_IS_EMPTY */ +#endif /* MACHINE_PMAP_IS_EMPTY */ +} + +#define MAX_TRIES_TO_GET_RANDOM_ADDRESS 1000 +kern_return_t +vm_map_random_address_for_size( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size) +{ + kern_return_t kr = KERN_SUCCESS; + int tries = 0; + vm_map_offset_t random_addr = 0; + vm_map_offset_t hole_end; + + vm_map_entry_t next_entry = VM_MAP_ENTRY_NULL; + vm_map_entry_t prev_entry = VM_MAP_ENTRY_NULL; + vm_map_size_t vm_hole_size = 0; + vm_map_size_t addr_space_size; + + addr_space_size = vm_map_max(map) - vm_map_min(map); + + assert(page_aligned(size)); + + while (tries < MAX_TRIES_TO_GET_RANDOM_ADDRESS) { + random_addr = ((vm_map_offset_t)random()) << PAGE_SHIFT; + random_addr = vm_map_trunc_page( + vm_map_min(map) + (random_addr % addr_space_size), + VM_MAP_PAGE_MASK(map)); + + if (vm_map_lookup_entry(map, random_addr, &prev_entry) == FALSE) { + if (prev_entry == vm_map_to_entry(map)) { + next_entry = vm_map_first_entry(map); + } else { + next_entry = prev_entry->vme_next; + } + if (next_entry == vm_map_to_entry(map)) { + hole_end = vm_map_max(map); + } else { + hole_end = next_entry->vme_start; + } + vm_hole_size = hole_end - random_addr; + if (vm_hole_size >= size) { + *address = random_addr; + break; + } + } + tries++; + } + + if (tries == MAX_TRIES_TO_GET_RANDOM_ADDRESS) { + kr = KERN_NO_SPACE; + } + return kr; +} + +static boolean_t +vm_memory_malloc_no_cow( + int alias) +{ + uint64_t alias_mask; + + if (alias > 63) { + return FALSE; + } + + alias_mask = 1ULL << alias; + if (alias_mask & vm_memory_malloc_no_cow_mask) { + return TRUE; + } + return FALSE; } /* @@ -1418,39 +2168,64 @@ static unsigned int vm_map_enter_restore_successes = 0; static unsigned int vm_map_enter_restore_failures = 0; kern_return_t vm_map_enter( - vm_map_t map, - vm_map_offset_t *address, /* IN/OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) -{ - vm_map_entry_t entry, new_entry; - vm_map_offset_t start, tmp_start, tmp_offset; - vm_map_offset_t end, tmp_end; - vm_map_offset_t tmp2_start, tmp2_end; - vm_map_offset_t step; - kern_return_t result = KERN_SUCCESS; - vm_map_t zap_old_map = VM_MAP_NULL; - vm_map_t zap_new_map = VM_MAP_NULL; - boolean_t map_locked = FALSE; - boolean_t pmap_empty = TRUE; - boolean_t new_mapping_established = FALSE; - boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); - boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); - boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); - boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); - boolean_t is_submap = ((flags & VM_FLAGS_SUBMAP) != 0); - boolean_t permanent = ((flags & VM_FLAGS_PERMANENT) != 0); - unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); - char alias; - vm_map_offset_t effective_min_offset, effective_max_offset; - kern_return_t kr; + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t alias, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_map_entry_t entry, new_entry; + vm_map_offset_t start, tmp_start, tmp_offset; + vm_map_offset_t end, tmp_end; + vm_map_offset_t tmp2_start, tmp2_end; + vm_map_offset_t desired_empty_end; + vm_map_offset_t step; + kern_return_t result = KERN_SUCCESS; + vm_map_t zap_old_map = VM_MAP_NULL; + vm_map_t zap_new_map = VM_MAP_NULL; + boolean_t map_locked = FALSE; + boolean_t pmap_empty = TRUE; + boolean_t new_mapping_established = FALSE; + boolean_t keep_map_locked = vmk_flags.vmkf_keep_map_locked; + boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); + boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); + boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); + boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); + boolean_t is_submap = vmk_flags.vmkf_submap; + boolean_t permanent = vmk_flags.vmkf_permanent; + boolean_t no_copy_on_read = vmk_flags.vmkf_no_copy_on_read; + boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; + boolean_t iokit_acct = vmk_flags.vmkf_iokit_acct; + boolean_t resilient_codesign = ((flags & VM_FLAGS_RESILIENT_CODESIGN) != 0); + boolean_t resilient_media = ((flags & VM_FLAGS_RESILIENT_MEDIA) != 0); + boolean_t random_address = ((flags & VM_FLAGS_RANDOM_ADDR) != 0); + unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); + vm_tag_t user_alias; + vm_map_offset_t effective_min_offset, effective_max_offset; + kern_return_t kr; + boolean_t clear_map_aligned = FALSE; + vm_map_entry_t hole_entry; + vm_map_size_t chunk_size = 0; + + assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused); + + if (flags & VM_FLAGS_4GB_CHUNK) { +#if defined(__LP64__) + chunk_size = (4ULL * 1024 * 1024 * 1024); /* max. 4GB chunks for the new allocation */ +#else /* __LP64__ */ + chunk_size = ANON_CHUNK_SIZE; +#endif /* __LP64__ */ + } else { + chunk_size = ANON_CHUNK_SIZE; + } if (superpage_size) { switch (superpage_size) { @@ -1462,30 +2237,89 @@ vm_map_enter( * with a lookup of the size depending on superpage_size. */ #ifdef __x86_64__ - case SUPERPAGE_SIZE_ANY: - /* handle it like 2 MB and round up to page size */ - size = (size + 2*1024*1024 - 1) & ~(2*1024*1024 - 1); - case SUPERPAGE_SIZE_2MB: - break; + case SUPERPAGE_SIZE_ANY: + /* handle it like 2 MB and round up to page size */ + size = (size + 2 * 1024 * 1024 - 1) & ~(2 * 1024 * 1024 - 1); + case SUPERPAGE_SIZE_2MB: + break; #endif - default: - return KERN_INVALID_ARGUMENT; + default: + return KERN_INVALID_ARGUMENT; } - mask = SUPERPAGE_SIZE-1; - if (size & (SUPERPAGE_SIZE-1)) + mask = SUPERPAGE_SIZE - 1; + if (size & (SUPERPAGE_SIZE - 1)) { return KERN_INVALID_ARGUMENT; - inheritance = VM_INHERIT_NONE; /* fork() children won't inherit superpages */ + } + inheritance = VM_INHERIT_NONE; /* fork() children won't inherit superpages */ } -#if CONFIG_EMBEDDED - if (cur_protection & VM_PROT_WRITE){ - if ((cur_protection & VM_PROT_EXECUTE) && !(flags & VM_FLAGS_MAP_JIT)){ - printf("EMBEDDED: %s curprot cannot be write+execute. turning off execute\n", __PRETTY_FUNCTION__); - cur_protection &= ~VM_PROT_EXECUTE; + if ((cur_protection & VM_PROT_WRITE) && + (cur_protection & VM_PROT_EXECUTE) && +#if !CONFIG_EMBEDDED + map != kernel_map && + (cs_process_global_enforcement() || + (vmk_flags.vmkf_cs_enforcement_override + ? vmk_flags.vmkf_cs_enforcement + : cs_process_enforcement(NULL))) && +#endif /* !CONFIG_EMBEDDED */ + !entry_for_jit) { + DTRACE_VM3(cs_wx, + uint64_t, 0, + uint64_t, 0, + vm_prot_t, cur_protection); + printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. " +#if VM_PROTECT_WX_FAIL + "failing\n", +#else /* VM_PROTECT_WX_FAIL */ + "turning off execute\n", +#endif /* VM_PROTECT_WX_FAIL */ + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); + cur_protection &= ~VM_PROT_EXECUTE; +#if VM_PROTECT_WX_FAIL + return KERN_PROTECTION_FAILURE; +#endif /* VM_PROTECT_WX_FAIL */ + } + + /* + * If the task has requested executable lockdown, + * deny any new executable mapping. + */ + if (map->map_disallow_new_exec == TRUE) { + if (cur_protection & VM_PROT_EXECUTE) { + return KERN_PROTECTION_FAILURE; + } + } + + if (resilient_codesign) { + assert(!is_submap); + if ((cur_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) || + (max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE))) { + return KERN_PROTECTION_FAILURE; + } + } + + if (resilient_media) { + assert(!is_submap); +// assert(!needs_copy); + if (object != VM_OBJECT_NULL && + !object->internal) { + /* + * This mapping is directly backed by an external + * memory manager (e.g. a vnode pager for a file): + * we would not have any safe place to inject + * a zero-filled page if an actual page is not + * available, without possibly impacting the actual + * contents of the mapped object (e.g. the file), + * so we can't provide any media resiliency here. + */ + return KERN_INVALID_ARGUMENT; } } -#endif /* CONFIG_EMBEDDED */ if (is_submap) { if (purgable) { @@ -1497,7 +2331,7 @@ vm_map_enter( return KERN_INVALID_ARGUMENT; } } - if (flags & VM_FLAGS_ALREADY) { + if (vmk_flags.vmkf_already) { /* * VM_FLAGS_ALREADY says that it's OK if the same mapping * is already present. For it to be meaningul, the requested @@ -1513,16 +2347,26 @@ vm_map_enter( effective_min_offset = map->min_offset; - if (flags & VM_FLAGS_BEYOND_MAX) { + if (vmk_flags.vmkf_beyond_max) { /* * Allow an insertion beyond the map's max offset. */ - if (vm_map_is_64bit(map)) +#if !defined(__arm__) && !defined(__arm64__) + if (vm_map_is_64bit(map)) { effective_max_offset = 0xFFFFFFFFFFFFF000ULL; - else - effective_max_offset = 0x00000000FFFFF000ULL; + } else +#endif /* __arm__ */ + effective_max_offset = 0x00000000FFFFF000ULL; } else { +#if !defined(CONFIG_EMBEDDED) + if (__improbable(vmk_flags.vmkf_32bit_map_va)) { + effective_max_offset = MIN(map->max_offset, 0x00000000FFFFF000ULL); + } else { + effective_max_offset = map->max_offset; + } +#else effective_max_offset = map->max_offset; +#endif } if (size == 0 || @@ -1531,24 +2375,57 @@ vm_map_enter( return KERN_INVALID_ARGUMENT; } - VM_GET_FLAGS_ALIAS(flags, alias); + if (map->pmap == kernel_pmap) { + user_alias = VM_KERN_MEMORY_NONE; + } else { + user_alias = alias; + } + + if (user_alias == VM_MEMORY_MALLOC_MEDIUM) { + chunk_size = MALLOC_MEDIUM_CHUNK_SIZE; + } -#define RETURN(value) { result = value; goto BailOut; } +#define RETURN(value) { result = value; goto BailOut; } assert(page_aligned(*address)); assert(page_aligned(size)); + if (!VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))) { + /* + * In most cases, the caller rounds the size up to the + * map's page size. + * If we get a size that is explicitly not map-aligned here, + * we'll have to respect the caller's wish and mark the + * mapping as "not map-aligned" to avoid tripping the + * map alignment checks later. + */ + clear_map_aligned = TRUE; + } + if (!anywhere && + !VM_MAP_PAGE_ALIGNED(*address, VM_MAP_PAGE_MASK(map))) { + /* + * We've been asked to map at a fixed address and that + * address is not aligned to the map's specific alignment. + * The caller should know what it's doing (i.e. most likely + * mapping some fragmented copy map, transferring memory from + * a VM map with a different alignment), so clear map_aligned + * for this new VM map entry and proceed. + */ + clear_map_aligned = TRUE; + } + /* * Only zero-fill objects are allowed to be purgable. * LP64todo - limit purgable objects to 32-bits for now */ if (purgable && (offset != 0 || - (object != VM_OBJECT_NULL && - (object->vo_size != size || - object->purgable == VM_PURGABLE_DENY)) - || size > ANON_MAX_SIZE)) /* LP64todo: remove when dp capable */ + (object != VM_OBJECT_NULL && + (object->vo_size != size || + object->purgable == VM_PURGABLE_DENY)) + || size > ANON_MAX_SIZE)) { /* LP64todo: remove when dp capable */ return KERN_INVALID_ARGUMENT; + } if (!anywhere && overwrite) { /* @@ -1561,32 +2438,60 @@ vm_map_enter( * new mapping fails. */ zap_old_map = vm_map_create(PMAP_NULL, - *address, - *address + size, - map->hdr.entries_pageable); + *address, + *address + size, + map->hdr.entries_pageable); + vm_map_set_page_shift(zap_old_map, VM_MAP_PAGE_SHIFT(map)); + vm_map_disable_hole_optimization(zap_old_map); } -StartAgain: ; +StartAgain:; start = *address; if (anywhere) { vm_map_lock(map); map_locked = TRUE; - - if ((flags & VM_FLAGS_MAP_JIT) && (map->jit_entry_exists)){ - result = KERN_INVALID_ARGUMENT; - goto BailOut; + + if (entry_for_jit) { +#if CONFIG_EMBEDDED + if (map->jit_entry_exists) { + result = KERN_INVALID_ARGUMENT; + goto BailOut; + } + random_address = TRUE; +#endif /* CONFIG_EMBEDDED */ } + if (random_address) { + /* + * Get a random start address. + */ + result = vm_map_random_address_for_size(map, address, size); + if (result != KERN_SUCCESS) { + goto BailOut; + } + start = *address; + } +#if !CONFIG_EMBEDDED + else if ((start == 0 || start == vm_map_min(map)) && + !map->disable_vmentry_reuse && + map->vmmap_high_start != 0) { + start = map->vmmap_high_start; + } +#endif + + /* * Calculate the first possible address. */ - if (start < effective_min_offset) + if (start < effective_min_offset) { start = effective_min_offset; - if (start > effective_max_offset) + } + if (start > effective_max_offset) { RETURN(KERN_NO_SPACE); + } /* * Look for the first possible address; @@ -1594,38 +2499,83 @@ StartAgain: ; * address, we have to start after it. */ - if( map->disable_vmentry_reuse == TRUE) { + if (map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(map, entry, start); } else { - assert(first_free_is_valid(map)); + if (map->holelistenabled) { + hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); - entry = map->first_free; + if (hole_entry == NULL) { + /* + * No more space in the map? + */ + result = KERN_NO_SPACE; + goto BailOut; + } else { + boolean_t found_hole = FALSE; - if (entry == vm_map_to_entry(map)) { - entry = NULL; + do { + if (hole_entry->vme_start >= start) { + start = hole_entry->vme_start; + found_hole = TRUE; + break; + } + + if (hole_entry->vme_end > start) { + found_hole = TRUE; + break; + } + hole_entry = hole_entry->vme_next; + } while (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)); + + if (found_hole == FALSE) { + result = KERN_NO_SPACE; + goto BailOut; + } + + entry = hole_entry; + + if (start == 0) { + start += PAGE_SIZE_64; + } + } } else { - if (entry->vme_next == vm_map_to_entry(map)){ - /* - * Hole at the end of the map. - */ + assert(first_free_is_valid(map)); + + entry = map->first_free; + + if (entry == vm_map_to_entry(map)) { entry = NULL; - } else { - if (start < (entry->vme_next)->vme_start ) { - start = entry->vme_end; - } else { + } else { + if (entry->vme_next == vm_map_to_entry(map)) { /* - * Need to do a lookup. + * Hole at the end of the map. */ entry = NULL; + } else { + if (start < (entry->vme_next)->vme_start) { + start = entry->vme_end; + start = vm_map_round_page(start, + VM_MAP_PAGE_MASK(map)); + } else { + /* + * Need to do a lookup. + */ + entry = NULL; + } } - } - } + } - if (entry == NULL) { - vm_map_entry_t tmp_entry; - if (vm_map_lookup_entry(map, start, &tmp_entry)) - start = tmp_entry->vme_end; - entry = tmp_entry; + if (entry == NULL) { + vm_map_entry_t tmp_entry; + if (vm_map_lookup_entry(map, start, &tmp_entry)) { + assert(!entry_for_jit); + start = tmp_entry->vme_end; + start = vm_map_round_page(start, + VM_MAP_PAGE_MASK(map)); + } + entry = tmp_entry; + } } } @@ -1636,7 +2586,7 @@ StartAgain: ; */ while (TRUE) { - register vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -1645,17 +2595,26 @@ StartAgain: ; */ end = ((start + mask) & ~mask); - if (end < start) + end = vm_map_round_page(end, + VM_MAP_PAGE_MASK(map)); + if (end < start) { RETURN(KERN_NO_SPACE); + } start = end; + assert(VM_MAP_PAGE_ALIGNED(start, + VM_MAP_PAGE_MASK(map))); end += size; - if ((end > effective_max_offset) || (end < start)) { + /* We want an entire page of empty space, but don't increase the allocation size. */ + desired_empty_end = vm_map_round_page(end, VM_MAP_PAGE_MASK(map)); + + if ((desired_empty_end > effective_max_offset) || (desired_empty_end < start)) { if (map->wait_for_space) { + assert(!keep_map_locked); if (size <= (effective_max_offset - - effective_min_offset)) { + effective_min_offset)) { assert_wait((event_t)map, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE); vm_map_unlock(map); map_locked = FALSE; thread_block(THREAD_CONTINUE_NULL); @@ -1665,30 +2624,63 @@ StartAgain: ; RETURN(KERN_NO_SPACE); } - /* - * If there are no more entries, we must win. - */ - next = entry->vme_next; - if (next == vm_map_to_entry(map)) - break; - /* - * If there is another entry, it must be - * after the end of the potential new region. - */ + if (map->holelistenabled) { + if (entry->vme_end >= desired_empty_end) { + break; + } + } else { + /* + * If there are no more entries, we must win. + * + * OR + * + * If there is another entry, it must be + * after the end of the potential new region. + */ - if (next->vme_start >= end) - break; + if (next == vm_map_to_entry(map)) { + break; + } + + if (next->vme_start >= desired_empty_end) { + break; + } + } /* * Didn't fit -- move to the next entry. */ entry = next; - start = entry->vme_end; + + if (map->holelistenabled) { + if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { + /* + * Wrapped around + */ + result = KERN_NO_SPACE; + goto BailOut; + } + start = entry->vme_start; + } else { + start = entry->vme_end; + } + + start = vm_map_round_page(start, + VM_MAP_PAGE_MASK(map)); + } + + if (map->holelistenabled) { + if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { + panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); + } } + *address = start; + assert(VM_MAP_PAGE_ALIGNED(*address, + VM_MAP_PAGE_MASK(map))); } else { /* * Verify that: @@ -1698,8 +2690,9 @@ StartAgain: ; vm_map_lock(map); map_locked = TRUE; - if ((start & mask) != 0) + if ((start & mask) != 0) { RETURN(KERN_NO_SPACE); + } /* * ... the address is within bounds @@ -1714,14 +2707,21 @@ StartAgain: ; } if (overwrite && zap_old_map != VM_MAP_NULL) { + int remove_flags; /* * Fixed mapping and "overwrite" flag: attempt to * remove all existing mappings in the specified * address range, saving them in our "zap_old_map". */ + remove_flags = VM_MAP_REMOVE_SAVE_ENTRIES; + remove_flags |= VM_MAP_REMOVE_NO_MAP_ALIGN; + if (vmk_flags.vmkf_overwrite_immutable) { + /* we can overwrite immutable mappings */ + remove_flags |= VM_MAP_REMOVE_IMMUTABLE; + } (void) vm_map_delete(map, start, end, - VM_MAP_REMOVE_SAVE_ENTRIES, - zap_old_map); + remove_flags, + zap_old_map); } /* @@ -1729,7 +2729,7 @@ StartAgain: ; */ if (vm_map_lookup_entry(map, start, &entry)) { - if (! (flags & VM_FLAGS_ALREADY)) { + if (!(vmk_flags.vmkf_already)) { RETURN(KERN_NO_SPACE); } /* @@ -1740,10 +2740,9 @@ StartAgain: ; if (entry->vme_start < start) { tmp_start -= start - entry->vme_start; tmp_offset -= start - entry->vme_start; - } for (; entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { /* * Check if the mapping's attributes * match the existing map entry. @@ -1751,12 +2750,13 @@ StartAgain: ; if (entry == vm_map_to_entry(map) || entry->vme_start != tmp_start || entry->is_sub_map != is_submap || - entry->offset != tmp_offset || + VME_OFFSET(entry) != tmp_offset || entry->needs_copy != needs_copy || entry->protection != cur_protection || entry->max_protection != max_protection || entry->inheritance != inheritance || - entry->alias != alias) { + entry->iokit_acct != iokit_acct || + VME_ALIAS(entry) != alias) { /* not the same mapping ! */ RETURN(KERN_NO_SPACE); } @@ -1764,21 +2764,21 @@ StartAgain: ; * Check if the same object is being mapped. */ if (is_submap) { - if (entry->object.sub_map != + if (VME_SUBMAP(entry) != (vm_map_t) object) { /* not the same submap */ RETURN(KERN_NO_SPACE); } } else { - if (entry->object.vm_object != object) { + if (VME_OBJECT(entry) != object) { /* not the same VM object... */ vm_object_t obj2; - obj2 = entry->object.vm_object; + obj2 = VME_OBJECT(entry); if ((obj2 == VM_OBJECT_NULL || - obj2->internal) && + obj2->internal) && (object == VM_OBJECT_NULL || - object->internal)) { + object->internal)) { /* * ... but both are * anonymous memory, @@ -1807,8 +2807,9 @@ StartAgain: ; */ if ((entry->vme_next != vm_map_to_entry(map)) && - (entry->vme_next->vme_start < end)) + (entry->vme_next->vme_start < end)) { RETURN(KERN_NO_SPACE); + } } /* @@ -1830,46 +2831,104 @@ StartAgain: ; * semantics. */ - if (purgable) { + if (purgable || + entry_for_jit || + vm_memory_malloc_no_cow(user_alias)) { if (object == VM_OBJECT_NULL) { object = vm_object_allocate(size); object->copy_strategy = MEMORY_OBJECT_COPY_NONE; - object->purgable = VM_PURGABLE_NONVOLATILE; - offset = (vm_object_offset_t)0; - } - } else if ((is_submap == FALSE) && - (object == VM_OBJECT_NULL) && - (entry != vm_map_to_entry(map)) && - (entry->vme_end == start) && - (!entry->is_shared) && - (!entry->is_sub_map) && - ((alias == VM_MEMORY_REALLOC) || (entry->alias == alias)) && - (entry->inheritance == inheritance) && - (entry->protection == cur_protection) && - (entry->max_protection == max_protection) && - (entry->behavior == VM_BEHAVIOR_DEFAULT) && - (entry->in_transition == 0) && - (entry->no_cache == no_cache) && - ((entry->vme_end - entry->vme_start) + size <= - (alias == VM_MEMORY_REALLOC ? - ANON_CHUNK_SIZE : - NO_COALESCE_LIMIT)) && - (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ - if (vm_object_coalesce(entry->object.vm_object, - VM_OBJECT_NULL, - entry->offset, - (vm_object_offset_t) 0, - (vm_map_size_t)(entry->vme_end - entry->vme_start), - (vm_map_size_t)(end - entry->vme_end))) { - - /* - * Coalesced the two objects - can extend + object->true_share = FALSE; + if (purgable) { + task_t owner; + object->purgable = VM_PURGABLE_NONVOLATILE; + if (map->pmap == kernel_pmap) { + /* + * Purgeable mappings made in a kernel + * map are "owned" by the kernel itself + * rather than the current user task + * because they're likely to be used by + * more than this user task (see + * execargs_purgeable_allocate(), for + * example). + */ + owner = kernel_task; + } else { + owner = current_task(); + } + assert(object->vo_owner == NULL); + assert(object->resident_page_count == 0); + assert(object->wired_page_count == 0); + vm_object_lock(object); + vm_purgeable_nonvolatile_enqueue(object, owner); + vm_object_unlock(object); + } + offset = (vm_object_offset_t)0; + } + } else if ((is_submap == FALSE) && + (object == VM_OBJECT_NULL) && + (entry != vm_map_to_entry(map)) && + (entry->vme_end == start) && + (!entry->is_shared) && + (!entry->is_sub_map) && + (!entry->in_transition) && + (!entry->needs_wakeup) && + (entry->behavior == VM_BEHAVIOR_DEFAULT) && + (entry->protection == cur_protection) && + (entry->max_protection == max_protection) && + (entry->inheritance == inheritance) && + ((user_alias == VM_MEMORY_REALLOC) || + (VME_ALIAS(entry) == alias)) && + (entry->no_cache == no_cache) && + (entry->permanent == permanent) && + /* no coalescing for immutable executable mappings */ + !((entry->protection & VM_PROT_EXECUTE) && + entry->permanent) && + (!entry->superpage_size && !superpage_size) && + /* + * No coalescing if not map-aligned, to avoid propagating + * that condition any further than needed: + */ + (!entry->map_aligned || !clear_map_aligned) && + (!entry->zero_wired_pages) && + (!entry->used_for_jit && !entry_for_jit) && + (!entry->pmap_cs_associated) && + (entry->iokit_acct == iokit_acct) && + (!entry->vme_resilient_codesign) && + (!entry->vme_resilient_media) && + (!entry->vme_atomic) && + (entry->vme_no_copy_on_read == no_copy_on_read) && + + ((entry->vme_end - entry->vme_start) + size <= + (user_alias == VM_MEMORY_REALLOC ? + ANON_CHUNK_SIZE : + NO_COALESCE_LIMIT)) && + + (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ + if (vm_object_coalesce(VME_OBJECT(entry), + VM_OBJECT_NULL, + VME_OFFSET(entry), + (vm_object_offset_t) 0, + (vm_map_size_t)(entry->vme_end - entry->vme_start), + (vm_map_size_t)(end - entry->vme_end))) { + /* + * Coalesced the two objects - can extend * the previous map entry to include the * new range. */ map->size += (end - entry->vme_end); + assert(entry->vme_start < end); + assert(VM_MAP_PAGE_ALIGNED(end, + VM_MAP_PAGE_MASK(map))); + if (__improbable(vm_debug_events)) { + DTRACE_VM5(map_entry_extend, vm_map_t, map, vm_map_entry_t, entry, vm_address_t, entry->vme_start, vm_address_t, entry->vme_end, vm_address_t, end); + } entry->vme_end = end; - vm_map_store_update_first_free(map, map->first_free); + if (map->holelistenabled) { + vm_map_store_update_first_free(map, entry, TRUE); + } else { + vm_map_store_update_first_free(map, map->first_free, TRUE); + } + new_mapping_established = TRUE; RETURN(KERN_SUCCESS); } } @@ -1877,164 +2936,207 @@ StartAgain: ; step = superpage_size ? SUPERPAGE_SIZE : (end - start); new_entry = NULL; - for (tmp2_start = start; tmp2_start (vm_map_size_t)ANON_CHUNK_SIZE && + size > chunk_size && max_protection != VM_PROT_NONE && - superpage_size == 0) - tmp_end = tmp_start + (vm_map_size_t)ANON_CHUNK_SIZE; - else + superpage_size == 0) { + tmp_end = tmp_start + chunk_size; + } else { tmp_end = tmp2_end; + } do { - new_entry = vm_map_entry_insert(map, entry, tmp_start, tmp_end, - object, offset, needs_copy, - FALSE, FALSE, - cur_protection, max_protection, - VM_BEHAVIOR_DEFAULT, - (flags & VM_FLAGS_MAP_JIT)? VM_INHERIT_NONE: inheritance, - 0, no_cache, - permanent, superpage_size); - new_entry->alias = alias; - if (flags & VM_FLAGS_MAP_JIT){ - if (!(map->jit_entry_exists)){ - new_entry->used_for_jit = TRUE; - map->jit_entry_exists = TRUE; - } + new_entry = vm_map_entry_insert( + map, entry, tmp_start, tmp_end, + object, offset, needs_copy, + FALSE, FALSE, + cur_protection, max_protection, + VM_BEHAVIOR_DEFAULT, + (entry_for_jit)? VM_INHERIT_NONE: inheritance, + 0, + no_cache, + permanent, + no_copy_on_read, + superpage_size, + clear_map_aligned, + is_submap, + entry_for_jit, + alias); + + assert((object != kernel_object) || (VM_KERN_MEMORY_NONE != alias)); + + if (resilient_codesign && + !((cur_protection | max_protection) & + (VM_PROT_WRITE | VM_PROT_EXECUTE))) { + new_entry->vme_resilient_codesign = TRUE; + } + + if (resilient_media && + (object == VM_OBJECT_NULL || + object->internal)) { + new_entry->vme_resilient_media = TRUE; + } + + assert(!new_entry->iokit_acct); + if (!is_submap && + object != VM_OBJECT_NULL && + (object->purgable != VM_PURGABLE_DENY || + object->vo_ledger_tag)) { + assert(new_entry->use_pmap); + assert(!new_entry->iokit_acct); + /* + * Turn off pmap accounting since + * purgeable (or tagged) objects have their + * own ledgers. + */ + new_entry->use_pmap = FALSE; + } else if (!is_submap && + iokit_acct && + object != VM_OBJECT_NULL && + object->internal) { + /* alternate accounting */ + assert(!new_entry->iokit_acct); + assert(new_entry->use_pmap); + new_entry->iokit_acct = TRUE; + new_entry->use_pmap = FALSE; + DTRACE_VM4( + vm_map_iokit_mapped_region, + vm_map_t, map, + vm_map_offset_t, new_entry->vme_start, + vm_map_offset_t, new_entry->vme_end, + int, VME_ALIAS(new_entry)); + vm_map_iokit_mapped_region( + map, + (new_entry->vme_end - + new_entry->vme_start)); + } else if (!is_submap) { + assert(!new_entry->iokit_acct); + assert(new_entry->use_pmap); } if (is_submap) { - vm_map_t submap; - boolean_t submap_is_64bit; - boolean_t use_pmap; + vm_map_t submap; + boolean_t submap_is_64bit; + boolean_t use_pmap; - new_entry->is_sub_map = TRUE; + assert(new_entry->is_sub_map); + assert(!new_entry->use_pmap); + assert(!new_entry->iokit_acct); submap = (vm_map_t) object; submap_is_64bit = vm_map_is_64bit(submap); - use_pmap = (alias == VM_MEMORY_SHARED_PMAP); - #ifndef NO_NESTED_PMAP + use_pmap = vmk_flags.vmkf_nested_pmap; +#ifndef NO_NESTED_PMAP if (use_pmap && submap->pmap == NULL) { + ledger_t ledger = map->pmap->ledger; /* we need a sub pmap to nest... */ - submap->pmap = pmap_create(0, submap_is_64bit); + submap->pmap = pmap_create_options(ledger, 0, + submap_is_64bit ? PMAP_CREATE_64BIT : 0); if (submap->pmap == NULL) { /* let's proceed without nesting... */ } +#if defined(__arm__) || defined(__arm64__) + else { + pmap_set_nested(submap->pmap); + } +#endif } if (use_pmap && submap->pmap != NULL) { kr = pmap_nest(map->pmap, - submap->pmap, - tmp_start, - tmp_start, - tmp_end - tmp_start); + submap->pmap, + tmp_start, + tmp_start, + tmp_end - tmp_start); if (kr != KERN_SUCCESS) { printf("vm_map_enter: " - "pmap_nest(0x%llx,0x%llx) " - "error 0x%x\n", - (long long)tmp_start, - (long long)tmp_end, - kr); + "pmap_nest(0x%llx,0x%llx) " + "error 0x%x\n", + (long long)tmp_start, + (long long)tmp_end, + kr); } else { /* we're now nested ! */ new_entry->use_pmap = TRUE; pmap_empty = FALSE; } } - #endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ } entry = new_entry; if (superpage_size) { vm_page_t pages, m; vm_object_t sp_object; + vm_object_offset_t sp_offset; - entry->offset = 0; + VME_OFFSET_SET(entry, 0); /* allocate one superpage */ - kr = cpm_allocate(SUPERPAGE_SIZE, &pages, 0, SUPERPAGE_NBASEPAGES-1, TRUE, 0); + kr = cpm_allocate(SUPERPAGE_SIZE, &pages, 0, SUPERPAGE_NBASEPAGES - 1, TRUE, 0); if (kr != KERN_SUCCESS) { - new_mapping_established = TRUE; /* will cause deallocation of whole range */ + /* deallocate whole range... */ + new_mapping_established = TRUE; + /* ... but only up to "tmp_end" */ + size -= end - tmp_end; RETURN(kr); } /* create one vm_object per superpage */ sp_object = vm_object_allocate((vm_map_size_t)(entry->vme_end - entry->vme_start)); sp_object->phys_contiguous = TRUE; - sp_object->vo_shadow_offset = (vm_object_offset_t)pages->phys_page*PAGE_SIZE; - entry->object.vm_object = sp_object; + sp_object->vo_shadow_offset = (vm_object_offset_t)VM_PAGE_GET_PHYS_PAGE(pages) * PAGE_SIZE; + VME_OBJECT_SET(entry, sp_object); + assert(entry->use_pmap); /* enter the base pages into the object */ vm_object_lock(sp_object); - for (offset = 0; offset < SUPERPAGE_SIZE; offset += PAGE_SIZE) { + for (sp_offset = 0; + sp_offset < SUPERPAGE_SIZE; + sp_offset += PAGE_SIZE) { m = pages; - pmap_zero_page(m->phys_page); + pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m)); pages = NEXT_PAGE(m); *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL; - vm_page_insert(m, sp_object, offset); + vm_page_insert_wired(m, sp_object, sp_offset, VM_KERN_MEMORY_OSFMK); } vm_object_unlock(sp_object); } - } while (tmp_end != tmp2_end && - (tmp_start = tmp_end) && - (tmp_end = (tmp2_end - tmp_end > (vm_map_size_t)ANON_CHUNK_SIZE) ? - tmp_end + (vm_map_size_t)ANON_CHUNK_SIZE : tmp2_end)); + } while (tmp_end != tmp2_end && + (tmp_start = tmp_end) && + (tmp_end = (tmp2_end - tmp_end > chunk_size) ? + tmp_end + chunk_size : tmp2_end)); } - vm_map_unlock(map); - map_locked = FALSE; - new_mapping_established = TRUE; - /* Wire down the new entry if the user - * requested all new map entries be wired. - */ - if ((map->wiring_required)||(superpage_size)) { - pmap_empty = FALSE; /* pmap won't be empty */ - result = vm_map_wire(map, start, end, - new_entry->protection, TRUE); - RETURN(result); - } - - if ((object != VM_OBJECT_NULL) && - (vm_map_pmap_enter_enable) && - (!anywhere) && - (!needs_copy) && - (size < (128*1024))) { - pmap_empty = FALSE; /* pmap won't be empty */ - - if (override_nx(map, alias) && cur_protection) - cur_protection |= VM_PROT_EXECUTE; +BailOut: + assert(map_locked == TRUE); - vm_map_pmap_enter(map, start, end, - object, offset, cur_protection); - } - -BailOut: ; if (result == KERN_SUCCESS) { vm_prot_t pager_prot; memory_object_t pager; +#if DEBUG if (pmap_empty && - !(flags & VM_FLAGS_NO_PMAP_CHECK)) { + !(vmk_flags.vmkf_no_pmap_check)) { assert(vm_map_pmap_is_empty(map, - *address, - *address+size)); + *address, + *address + size)); } +#endif /* DEBUG */ /* * For "named" VM objects, let the pager know that the @@ -2074,7 +3176,35 @@ BailOut: ; } vm_object_unlock(object); } - } else { + } + + assert(map_locked == TRUE); + + if (!keep_map_locked) { + vm_map_unlock(map); + map_locked = FALSE; + } + + /* + * We can't hold the map lock if we enter this block. + */ + + if (result == KERN_SUCCESS) { + /* Wire down the new entry if the user + * requested all new map entries be wired. + */ + if ((map->wiring_required) || (superpage_size)) { + assert(!keep_map_locked); + pmap_empty = FALSE; /* pmap won't be empty */ + kr = vm_map_wire_kernel(map, start, end, + new_entry->protection, VM_KERN_MEMORY_MLOCK, + TRUE); + result = kr; + } + + } + + if (result != KERN_SUCCESS) { if (new_mapping_established) { /* * We have to get rid of the new mappings since we @@ -2083,20 +3213,25 @@ BailOut: ; * that someone else create new mappings that range. */ zap_new_map = vm_map_create(PMAP_NULL, - *address, - *address + size, - map->hdr.entries_pageable); + *address, + *address + size, + map->hdr.entries_pageable); + vm_map_set_page_shift(zap_new_map, + VM_MAP_PAGE_SHIFT(map)); + vm_map_disable_hole_optimization(zap_new_map); + if (!map_locked) { vm_map_lock(map); map_locked = TRUE; } - (void) vm_map_delete(map, *address, *address+size, - VM_MAP_REMOVE_SAVE_ENTRIES, - zap_new_map); + (void) vm_map_delete(map, *address, *address + size, + (VM_MAP_REMOVE_SAVE_ENTRIES | + VM_MAP_REMOVE_NO_MAP_ALIGN), + zap_new_map); } if (zap_old_map != VM_MAP_NULL && zap_old_map->hdr.nentries != 0) { - vm_map_entry_t entry1, entry2; + vm_map_entry_t entry1, entry2; /* * The new mapping failed. Attempt to restore @@ -2126,16 +3261,17 @@ BailOut: ; * inserting them all after "entry1". */ for (entry2 = vm_map_first_entry(zap_old_map); - entry2 != vm_map_to_entry(zap_old_map); - entry2 = vm_map_first_entry(zap_old_map)) { + entry2 != vm_map_to_entry(zap_old_map); + entry2 = vm_map_first_entry(zap_old_map)) { vm_map_size_t entry_size; entry_size = (entry2->vme_end - - entry2->vme_start); + entry2->vme_start); vm_map_store_entry_unlink(zap_old_map, - entry2); + entry2); zap_old_map->size -= entry_size; - vm_map_store_entry_link(map, entry1, entry2); + vm_map_store_entry_link(map, entry1, entry2, + VM_MAP_KERNEL_FLAGS_NONE); map->size += entry_size; entry1 = entry2; } @@ -2150,7 +3286,11 @@ BailOut: ; } } - if (map_locked) { + /* + * The caller is responsible for releasing the lock if it requested to + * keep the map locked. + */ + if (map_locked && !keep_map_locked) { vm_map_unlock(map); } @@ -2169,371 +3309,684 @@ BailOut: ; return result; -#undef RETURN +#undef RETURN } +#if __arm64__ +extern const struct memory_object_pager_ops fourk_pager_ops; kern_return_t -vm_map_enter_mem_object( - vm_map_t target_map, - vm_map_offset_t *address, - vm_map_size_t initial_size, - vm_map_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) -{ - vm_map_address_t map_addr; - vm_map_size_t map_size; - vm_object_t object; - vm_object_size_t size; - kern_return_t result; - boolean_t mask_cur_protection, mask_max_protection; +vm_map_enter_fourk( + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t alias, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_map_entry_t entry, new_entry; + vm_map_offset_t start, fourk_start; + vm_map_offset_t end, fourk_end; + vm_map_size_t fourk_size; + kern_return_t result = KERN_SUCCESS; + vm_map_t zap_old_map = VM_MAP_NULL; + vm_map_t zap_new_map = VM_MAP_NULL; + boolean_t map_locked = FALSE; + boolean_t pmap_empty = TRUE; + boolean_t new_mapping_established = FALSE; + boolean_t keep_map_locked = vmk_flags.vmkf_keep_map_locked; + boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); + boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); + boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); + boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); + boolean_t is_submap = vmk_flags.vmkf_submap; + boolean_t permanent = vmk_flags.vmkf_permanent; + boolean_t no_copy_on_read = vmk_flags.vmkf_permanent; + boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; +// boolean_t iokit_acct = vmk_flags.vmkf_iokit_acct; + unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); + vm_map_offset_t effective_min_offset, effective_max_offset; + kern_return_t kr; + boolean_t clear_map_aligned = FALSE; + memory_object_t fourk_mem_obj; + vm_object_t fourk_object; + vm_map_offset_t fourk_pager_offset; + int fourk_pager_index_start, fourk_pager_index_num; + int cur_idx; + boolean_t fourk_copy; + vm_object_t copy_object; + vm_object_offset_t copy_offset; + + fourk_mem_obj = MEMORY_OBJECT_NULL; + fourk_object = VM_OBJECT_NULL; - mask_cur_protection = cur_protection & VM_PROT_IS_MASK; - mask_max_protection = max_protection & VM_PROT_IS_MASK; - cur_protection &= ~VM_PROT_IS_MASK; - max_protection &= ~VM_PROT_IS_MASK; + if (superpage_size) { + return KERN_NOT_SUPPORTED; + } - /* - * Check arguments for validity - */ - if ((target_map == VM_MAP_NULL) || - (cur_protection & ~VM_PROT_ALL) || - (max_protection & ~VM_PROT_ALL) || - (inheritance > VM_INHERIT_LAST_VALID) || - initial_size == 0) - return KERN_INVALID_ARGUMENT; - - map_addr = vm_map_trunc_page(*address); - map_size = vm_map_round_page(initial_size); - size = vm_object_round_page(initial_size); + if ((cur_protection & VM_PROT_WRITE) && + (cur_protection & VM_PROT_EXECUTE) && +#if !CONFIG_EMBEDDED + map != kernel_map && + cs_process_enforcement(NULL) && +#endif /* !CONFIG_EMBEDDED */ + !entry_for_jit) { + DTRACE_VM3(cs_wx, + uint64_t, 0, + uint64_t, 0, + vm_prot_t, cur_protection); + printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. " + "turning off execute\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); + cur_protection &= ~VM_PROT_EXECUTE; + } /* - * Find the vm object (if any) corresponding to this port. + * If the task has requested executable lockdown, + * deny any new executable mapping. */ - if (!IP_VALID(port)) { - object = VM_OBJECT_NULL; - offset = 0; - copy = FALSE; - } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) { - vm_named_entry_t named_entry; - - named_entry = (vm_named_entry_t) port->ip_kobject; - /* a few checks to make sure user is obeying rules */ - if (size == 0) { - if (offset >= named_entry->size) - return KERN_INVALID_RIGHT; - size = named_entry->size - offset; - } - if (mask_max_protection) { - max_protection &= named_entry->protection; - } - if (mask_cur_protection) { - cur_protection &= named_entry->protection; + if (map->map_disallow_new_exec == TRUE) { + if (cur_protection & VM_PROT_EXECUTE) { + return KERN_PROTECTION_FAILURE; } - if ((named_entry->protection & max_protection) != - max_protection) - return KERN_INVALID_RIGHT; - if ((named_entry->protection & cur_protection) != - cur_protection) - return KERN_INVALID_RIGHT; - if (named_entry->size < (offset + size)) - return KERN_INVALID_ARGUMENT; + } - /* the callers parameter offset is defined to be the */ - /* offset from beginning of named entry offset in object */ - offset = offset + named_entry->offset; - - named_entry_lock(named_entry); - if (named_entry->is_sub_map) { - vm_map_t submap; + if (is_submap) { + return KERN_NOT_SUPPORTED; + } + if (vmk_flags.vmkf_already) { + return KERN_NOT_SUPPORTED; + } + if (purgable || entry_for_jit) { + return KERN_NOT_SUPPORTED; + } - submap = named_entry->backing.map; - vm_map_lock(submap); - vm_map_reference(submap); - vm_map_unlock(submap); - named_entry_unlock(named_entry); + effective_min_offset = map->min_offset; - result = vm_map_enter(target_map, - &map_addr, - map_size, - mask, - flags | VM_FLAGS_SUBMAP, - (vm_object_t) submap, - offset, - copy, - cur_protection, - max_protection, - inheritance); - if (result != KERN_SUCCESS) { - vm_map_deallocate(submap); - } else { - /* - * No need to lock "submap" just to check its - * "mapped" flag: that flag is never reset - * once it's been set and if we race, we'll - * just end up setting it twice, which is OK. - */ - if (submap->mapped == FALSE) { - /* - * This submap has never been mapped. - * Set its "mapped" flag now that it - * has been mapped. - * This happens only for the first ever - * mapping of a "submap". - */ - vm_map_lock(submap); - submap->mapped = TRUE; - vm_map_unlock(submap); - } - *address = map_addr; - } - return result; + if (vmk_flags.vmkf_beyond_max) { + return KERN_NOT_SUPPORTED; + } else { + effective_max_offset = map->max_offset; + } - } else if (named_entry->is_pager) { - unsigned int access; - vm_prot_t protections; - unsigned int wimg_mode; + if (size == 0 || + (offset & FOURK_PAGE_MASK) != 0) { + *address = 0; + return KERN_INVALID_ARGUMENT; + } - protections = named_entry->protection & VM_PROT_ALL; - access = GET_MAP_MEM(named_entry->protection); +#define RETURN(value) { result = value; goto BailOut; } - object = vm_object_enter(named_entry->backing.pager, - named_entry->size, - named_entry->internal, - FALSE, - FALSE); - if (object == VM_OBJECT_NULL) { - named_entry_unlock(named_entry); - return KERN_INVALID_OBJECT; - } + assert(VM_MAP_PAGE_ALIGNED(*address, FOURK_PAGE_MASK)); + assert(VM_MAP_PAGE_ALIGNED(size, FOURK_PAGE_MASK)); - /* JMM - drop reference on pager here */ + if (!anywhere && overwrite) { + return KERN_NOT_SUPPORTED; + } + if (!anywhere && overwrite) { + /* + * Create a temporary VM map to hold the old mappings in the + * affected area while we create the new one. + * This avoids releasing the VM map lock in + * vm_map_entry_delete() and allows atomicity + * when we want to replace some mappings with a new one. + * It also allows us to restore the old VM mappings if the + * new mapping fails. + */ + zap_old_map = vm_map_create(PMAP_NULL, + *address, + *address + size, + map->hdr.entries_pageable); + vm_map_set_page_shift(zap_old_map, VM_MAP_PAGE_SHIFT(map)); + vm_map_disable_hole_optimization(zap_old_map); + } - /* create an extra ref for the named entry */ - vm_object_lock(object); - vm_object_reference_locked(object); - named_entry->backing.object = object; - named_entry->is_pager = FALSE; - named_entry_unlock(named_entry); + fourk_start = *address; + fourk_size = size; + fourk_end = fourk_start + fourk_size; - wimg_mode = object->wimg_bits; + start = vm_map_trunc_page(*address, VM_MAP_PAGE_MASK(map)); + end = vm_map_round_page(fourk_end, VM_MAP_PAGE_MASK(map)); + size = end - start; - if (access == MAP_MEM_IO) { - wimg_mode = VM_WIMG_IO; - } else if (access == MAP_MEM_COPYBACK) { - wimg_mode = VM_WIMG_USE_DEFAULT; - } else if (access == MAP_MEM_WTHRU) { - wimg_mode = VM_WIMG_WTHRU; - } else if (access == MAP_MEM_WCOMB) { - wimg_mode = VM_WIMG_WCOMB; - } + if (anywhere) { + return KERN_NOT_SUPPORTED; + } else { + /* + * Verify that: + * the address doesn't itself violate + * the mask requirement. + */ - /* wait for object (if any) to be ready */ - if (!named_entry->internal) { - while (!object->pager_ready) { - vm_object_wait( - object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); - vm_object_lock(object); - } - } + vm_map_lock(map); + map_locked = TRUE; + if ((start & mask) != 0) { + RETURN(KERN_NO_SPACE); + } - if (object->wimg_bits != wimg_mode) - vm_object_change_wimg_mode(object, wimg_mode); + /* + * ... the address is within bounds + */ - object->true_share = TRUE; + end = start + size; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) - object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; - vm_object_unlock(object); - } else { - /* This is the case where we are going to map */ - /* an already mapped object. If the object is */ - /* not ready it is internal. An external */ - /* object cannot be mapped until it is ready */ - /* we can therefore avoid the ready check */ - /* in this case. */ - object = named_entry->backing.object; - assert(object != VM_OBJECT_NULL); - named_entry_unlock(named_entry); - vm_object_reference(object); + if ((start < effective_min_offset) || + (end > effective_max_offset) || + (start >= end)) { + RETURN(KERN_INVALID_ADDRESS); } - } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) { + + if (overwrite && zap_old_map != VM_MAP_NULL) { + /* + * Fixed mapping and "overwrite" flag: attempt to + * remove all existing mappings in the specified + * address range, saving them in our "zap_old_map". + */ + (void) vm_map_delete(map, start, end, + (VM_MAP_REMOVE_SAVE_ENTRIES | + VM_MAP_REMOVE_NO_MAP_ALIGN), + zap_old_map); + } + /* - * JMM - This is temporary until we unify named entries - * and raw memory objects. - * - * Detected fake ip_kotype for a memory object. In - * this case, the port isn't really a port at all, but - * instead is just a raw memory object. + * ... the starting address isn't allocated */ - - object = vm_object_enter((memory_object_t)port, - size, FALSE, FALSE, FALSE); - if (object == VM_OBJECT_NULL) - return KERN_INVALID_OBJECT; + if (vm_map_lookup_entry(map, start, &entry)) { + vm_object_t cur_object, shadow_object; - /* wait for object (if any) to be ready */ - if (object != VM_OBJECT_NULL) { - if (object == kernel_object) { - printf("Warning: Attempt to map kernel object" - " by a non-private kernel entity\n"); - return KERN_INVALID_OBJECT; + /* + * We might already some 4K mappings + * in a 16K page here. + */ + + if (entry->vme_end - entry->vme_start + != SIXTEENK_PAGE_SIZE) { + RETURN(KERN_NO_SPACE); + } + if (entry->is_sub_map) { + RETURN(KERN_NO_SPACE); + } + if (VME_OBJECT(entry) == VM_OBJECT_NULL) { + RETURN(KERN_NO_SPACE); } - if (!object->pager_ready) { - vm_object_lock(object); - while (!object->pager_ready) { - vm_object_wait(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); - vm_object_lock(object); - } - vm_object_unlock(object); + /* go all the way down the shadow chain */ + cur_object = VME_OBJECT(entry); + vm_object_lock(cur_object); + while (cur_object->shadow != VM_OBJECT_NULL) { + shadow_object = cur_object->shadow; + vm_object_lock(shadow_object); + vm_object_unlock(cur_object); + cur_object = shadow_object; + shadow_object = VM_OBJECT_NULL; + } + if (cur_object->internal || + cur_object->pager == NULL) { + vm_object_unlock(cur_object); + RETURN(KERN_NO_SPACE); + } + if (cur_object->pager->mo_pager_ops + != &fourk_pager_ops) { + vm_object_unlock(cur_object); + RETURN(KERN_NO_SPACE); + } + fourk_object = cur_object; + fourk_mem_obj = fourk_object->pager; + + /* keep the "4K" object alive */ + vm_object_reference_locked(fourk_object); + vm_object_unlock(fourk_object); + + /* merge permissions */ + entry->protection |= cur_protection; + entry->max_protection |= max_protection; + if ((entry->protection & (VM_PROT_WRITE | + VM_PROT_EXECUTE)) == + (VM_PROT_WRITE | VM_PROT_EXECUTE) && + fourk_binary_compatibility_unsafe && + fourk_binary_compatibility_allow_wx) { + /* write+execute: need to be "jit" */ + entry->used_for_jit = TRUE; } - } - } else { - return KERN_INVALID_OBJECT; - } - if (object != VM_OBJECT_NULL && - object->named && - object->pager != MEMORY_OBJECT_NULL && - object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { - memory_object_t pager; - vm_prot_t pager_prot; - kern_return_t kr; + goto map_in_fourk_pager; + } /* - * For "named" VM objects, let the pager know that the - * memory object is being mapped. Some pagers need to keep - * track of this, to know when they can reclaim the memory - * object, for example. - * VM calls memory_object_map() for each mapping (specifying + * ... the next region doesn't overlap the + * end point. + */ + + if ((entry->vme_next != vm_map_to_entry(map)) && + (entry->vme_next->vme_start < end)) { + RETURN(KERN_NO_SPACE); + } + } + + /* + * At this point, + * "start" and "end" should define the endpoints of the + * available new range, and + * "entry" should refer to the region before the new + * range, and + * + * the map should be locked. + */ + + /* create a new "4K" pager */ + fourk_mem_obj = fourk_pager_create(); + fourk_object = fourk_pager_to_vm_object(fourk_mem_obj); + assert(fourk_object); + + /* keep the "4" object alive */ + vm_object_reference(fourk_object); + + /* create a "copy" object, to map the "4K" object copy-on-write */ + fourk_copy = TRUE; + result = vm_object_copy_strategically(fourk_object, + 0, + end - start, + ©_object, + ©_offset, + &fourk_copy); + assert(result == KERN_SUCCESS); + assert(copy_object != VM_OBJECT_NULL); + assert(copy_offset == 0); + + /* take a reference on the copy object, for this mapping */ + vm_object_reference(copy_object); + + /* map the "4K" pager's copy object */ + new_entry = + vm_map_entry_insert(map, entry, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(end, + VM_MAP_PAGE_MASK(map)), + copy_object, + 0, /* offset */ + FALSE, /* needs_copy */ + FALSE, + FALSE, + cur_protection, max_protection, + VM_BEHAVIOR_DEFAULT, + ((entry_for_jit) + ? VM_INHERIT_NONE + : inheritance), + 0, + no_cache, + permanent, + no_copy_on_read, + superpage_size, + clear_map_aligned, + is_submap, + FALSE, /* jit */ + alias); + entry = new_entry; + +#if VM_MAP_DEBUG_FOURK + if (vm_map_debug_fourk) { + printf("FOURK_PAGER: map %p [0x%llx:0x%llx] new pager %p\n", + map, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end, + fourk_mem_obj); + } +#endif /* VM_MAP_DEBUG_FOURK */ + + new_mapping_established = TRUE; + +map_in_fourk_pager: + /* "map" the original "object" where it belongs in the "4K" pager */ + fourk_pager_offset = (fourk_start & SIXTEENK_PAGE_MASK); + fourk_pager_index_start = (int) (fourk_pager_offset / FOURK_PAGE_SIZE); + if (fourk_size > SIXTEENK_PAGE_SIZE) { + fourk_pager_index_num = 4; + } else { + fourk_pager_index_num = (int) (fourk_size / FOURK_PAGE_SIZE); + } + if (fourk_pager_index_start + fourk_pager_index_num > 4) { + fourk_pager_index_num = 4 - fourk_pager_index_start; + } + for (cur_idx = 0; + cur_idx < fourk_pager_index_num; + cur_idx++) { + vm_object_t old_object; + vm_object_offset_t old_offset; + + kr = fourk_pager_populate(fourk_mem_obj, + TRUE, /* overwrite */ + fourk_pager_index_start + cur_idx, + object, + (object + ? (offset + + (cur_idx * FOURK_PAGE_SIZE)) + : 0), + &old_object, + &old_offset); +#if VM_MAP_DEBUG_FOURK + if (vm_map_debug_fourk) { + if (old_object == (vm_object_t) -1 && + old_offset == (vm_object_offset_t) -1) { + printf("FOURK_PAGER: map %p [0x%llx:0x%llx] " + "pager [%p:0x%llx] " + "populate[%d] " + "[object:%p,offset:0x%llx]\n", + map, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end, + fourk_mem_obj, + VME_OFFSET(entry), + fourk_pager_index_start + cur_idx, + object, + (object + ? (offset + (cur_idx * FOURK_PAGE_SIZE)) + : 0)); + } else { + printf("FOURK_PAGER: map %p [0x%llx:0x%llx] " + "pager [%p:0x%llx] " + "populate[%d] [object:%p,offset:0x%llx] " + "old [%p:0x%llx]\n", + map, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end, + fourk_mem_obj, + VME_OFFSET(entry), + fourk_pager_index_start + cur_idx, + object, + (object + ? (offset + (cur_idx * FOURK_PAGE_SIZE)) + : 0), + old_object, + old_offset); + } + } +#endif /* VM_MAP_DEBUG_FOURK */ + + assert(kr == KERN_SUCCESS); + if (object != old_object && + object != VM_OBJECT_NULL && + object != (vm_object_t) -1) { + vm_object_reference(object); + } + if (object != old_object && + old_object != VM_OBJECT_NULL && + old_object != (vm_object_t) -1) { + vm_object_deallocate(old_object); + } + } + +BailOut: + assert(map_locked == TRUE); + + if (fourk_object != VM_OBJECT_NULL) { + vm_object_deallocate(fourk_object); + fourk_object = VM_OBJECT_NULL; + fourk_mem_obj = MEMORY_OBJECT_NULL; + } + + if (result == KERN_SUCCESS) { + vm_prot_t pager_prot; + memory_object_t pager; + +#if DEBUG + if (pmap_empty && + !(vmk_flags.vmkf_no_pmap_check)) { + assert(vm_map_pmap_is_empty(map, + *address, + *address + size)); + } +#endif /* DEBUG */ + + /* + * For "named" VM objects, let the pager know that the + * memory object is being mapped. Some pagers need to keep + * track of this, to know when they can reclaim the memory + * object, for example. + * VM calls memory_object_map() for each mapping (specifying * the protection of each mapping) and calls * memory_object_last_unmap() when all the mappings are gone. */ pager_prot = max_protection; - if (copy) { + if (needs_copy) { /* - * Copy-On-Write mapping: won't modify the - * memory object. + * Copy-On-Write mapping: won't modify + * the memory object. */ pager_prot &= ~VM_PROT_WRITE; } - vm_object_lock(object); - pager = object->pager; - if (object->named && - pager != MEMORY_OBJECT_NULL && - object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { - assert(object->pager_ready); - vm_object_mapping_wait(object, THREAD_UNINT); - vm_object_mapping_begin(object); + if (!is_submap && + object != VM_OBJECT_NULL && + object->named && + object->pager != MEMORY_OBJECT_NULL) { + vm_object_lock(object); + pager = object->pager; + if (object->named && + pager != MEMORY_OBJECT_NULL) { + assert(object->pager_ready); + vm_object_mapping_wait(object, THREAD_UNINT); + vm_object_mapping_begin(object); + vm_object_unlock(object); + + kr = memory_object_map(pager, pager_prot); + assert(kr == KERN_SUCCESS); + + vm_object_lock(object); + vm_object_mapping_end(object); + } vm_object_unlock(object); + } + if (!is_submap && + fourk_object != VM_OBJECT_NULL && + fourk_object->named && + fourk_object->pager != MEMORY_OBJECT_NULL) { + vm_object_lock(fourk_object); + pager = fourk_object->pager; + if (fourk_object->named && + pager != MEMORY_OBJECT_NULL) { + assert(fourk_object->pager_ready); + vm_object_mapping_wait(fourk_object, + THREAD_UNINT); + vm_object_mapping_begin(fourk_object); + vm_object_unlock(fourk_object); - kr = memory_object_map(pager, pager_prot); - assert(kr == KERN_SUCCESS); + kr = memory_object_map(pager, VM_PROT_READ); + assert(kr == KERN_SUCCESS); - vm_object_lock(object); - vm_object_mapping_end(object); + vm_object_lock(fourk_object); + vm_object_mapping_end(fourk_object); + } + vm_object_unlock(fourk_object); } - vm_object_unlock(object); + } + + assert(map_locked == TRUE); + + if (!keep_map_locked) { + vm_map_unlock(map); + map_locked = FALSE; } /* - * Perform the copy if requested + * We can't hold the map lock if we enter this block. */ - if (copy) { - vm_object_t new_object; - vm_object_offset_t new_offset; + if (result == KERN_SUCCESS) { + /* Wire down the new entry if the user + * requested all new map entries be wired. + */ + if ((map->wiring_required) || (superpage_size)) { + assert(!keep_map_locked); + pmap_empty = FALSE; /* pmap won't be empty */ + kr = vm_map_wire_kernel(map, start, end, + new_entry->protection, VM_KERN_MEMORY_MLOCK, + TRUE); + result = kr; + } - result = vm_object_copy_strategically(object, offset, size, - &new_object, &new_offset, - ©); + } + if (result != KERN_SUCCESS) { + if (new_mapping_established) { + /* + * We have to get rid of the new mappings since we + * won't make them available to the user. + * Try and do that atomically, to minimize the risk + * that someone else create new mappings that range. + */ + zap_new_map = vm_map_create(PMAP_NULL, + *address, + *address + size, + map->hdr.entries_pageable); + vm_map_set_page_shift(zap_new_map, + VM_MAP_PAGE_SHIFT(map)); + vm_map_disable_hole_optimization(zap_new_map); - if (result == KERN_MEMORY_RESTART_COPY) { - boolean_t success; - boolean_t src_needs_copy; + if (!map_locked) { + vm_map_lock(map); + map_locked = TRUE; + } + (void) vm_map_delete(map, *address, *address + size, + (VM_MAP_REMOVE_SAVE_ENTRIES | + VM_MAP_REMOVE_NO_MAP_ALIGN), + zap_new_map); + } + if (zap_old_map != VM_MAP_NULL && + zap_old_map->hdr.nentries != 0) { + vm_map_entry_t entry1, entry2; /* - * XXX - * We currently ignore src_needs_copy. - * This really is the issue of how to make - * MEMORY_OBJECT_COPY_SYMMETRIC safe for - * non-kernel users to use. Solution forthcoming. - * In the meantime, since we don't allow non-kernel - * memory managers to specify symmetric copy, - * we won't run into problems here. + * The new mapping failed. Attempt to restore + * the old mappings, saved in the "zap_old_map". */ - new_object = object; - new_offset = offset; - success = vm_object_copy_quickly(&new_object, - new_offset, size, - &src_needs_copy, - ©); - assert(success); - result = KERN_SUCCESS; - } - /* - * Throw away the reference to the - * original object, as it won't be mapped. - */ + if (!map_locked) { + vm_map_lock(map); + map_locked = TRUE; + } - vm_object_deallocate(object); + /* first check if the coast is still clear */ + start = vm_map_first_entry(zap_old_map)->vme_start; + end = vm_map_last_entry(zap_old_map)->vme_end; + if (vm_map_lookup_entry(map, start, &entry1) || + vm_map_lookup_entry(map, end, &entry2) || + entry1 != entry2) { + /* + * Part of that range has already been + * re-mapped: we can't restore the old + * mappings... + */ + vm_map_enter_restore_failures++; + } else { + /* + * Transfer the saved map entries from + * "zap_old_map" to the original "map", + * inserting them all after "entry1". + */ + for (entry2 = vm_map_first_entry(zap_old_map); + entry2 != vm_map_to_entry(zap_old_map); + entry2 = vm_map_first_entry(zap_old_map)) { + vm_map_size_t entry_size; - if (result != KERN_SUCCESS) - return result; + entry_size = (entry2->vme_end - + entry2->vme_start); + vm_map_store_entry_unlink(zap_old_map, + entry2); + zap_old_map->size -= entry_size; + vm_map_store_entry_link(map, entry1, entry2, + VM_MAP_KERNEL_FLAGS_NONE); + map->size += entry_size; + entry1 = entry2; + } + if (map->wiring_required) { + /* + * XXX TODO: we should rewire the + * old pages here... + */ + } + vm_map_enter_restore_successes++; + } + } + } - object = new_object; - offset = new_offset; + /* + * The caller is responsible for releasing the lock if it requested to + * keep the map locked. + */ + if (map_locked && !keep_map_locked) { + vm_map_unlock(map); + } + + /* + * Get rid of the "zap_maps" and all the map entries that + * they may still contain. + */ + if (zap_old_map != VM_MAP_NULL) { + vm_map_destroy(zap_old_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); + zap_old_map = VM_MAP_NULL; + } + if (zap_new_map != VM_MAP_NULL) { + vm_map_destroy(zap_new_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); + zap_new_map = VM_MAP_NULL; } - result = vm_map_enter(target_map, - &map_addr, map_size, - (vm_map_offset_t)mask, - flags, - object, offset, - copy, - cur_protection, max_protection, inheritance); - if (result != KERN_SUCCESS) - vm_object_deallocate(object); - *address = map_addr; return result; -} +#undef RETURN +} +#endif /* __arm64__ */ +/* + * Counters for the prefault optimization. + */ +int64_t vm_prefault_nb_pages = 0; +int64_t vm_prefault_nb_bailout = 0; +static kern_return_t +vm_map_enter_mem_object_helper( + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance, + upl_page_list_ptr_t page_list, + unsigned int page_list_count) +{ + vm_map_address_t map_addr; + vm_map_size_t map_size; + vm_object_t object; + vm_object_size_t size; + kern_return_t result; + boolean_t mask_cur_protection, mask_max_protection; + boolean_t kernel_prefault, try_prefault = (page_list_count != 0); + vm_map_offset_t offset_in_mapping = 0; +#if __arm64__ + boolean_t fourk = vmk_flags.vmkf_fourk; +#endif /* __arm64__ */ + + assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused); -kern_return_t -vm_map_enter_mem_object_control( - vm_map_t target_map, - vm_map_offset_t *address, - vm_map_size_t initial_size, - vm_map_offset_t mask, - int flags, - memory_object_control_t control, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) -{ - vm_map_address_t map_addr; - vm_map_size_t map_size; - vm_object_t object; - vm_object_size_t size; - kern_return_t result; - memory_object_t pager; - vm_prot_t pager_prot; - kern_return_t kr; + mask_cur_protection = cur_protection & VM_PROT_IS_MASK; + mask_max_protection = max_protection & VM_PROT_IS_MASK; + cur_protection &= ~VM_PROT_IS_MASK; + max_protection &= ~VM_PROT_IS_MASK; /* * Check arguments for validity @@ -2542,69 +3995,555 @@ vm_map_enter_mem_object_control( (cur_protection & ~VM_PROT_ALL) || (max_protection & ~VM_PROT_ALL) || (inheritance > VM_INHERIT_LAST_VALID) || - initial_size == 0) + (try_prefault && (copy || !page_list)) || + initial_size == 0) { return KERN_INVALID_ARGUMENT; - - map_addr = vm_map_trunc_page(*address); - map_size = vm_map_round_page(initial_size); - size = vm_object_round_page(initial_size); - - object = memory_object_control_to_vm_object(control); - - if (object == VM_OBJECT_NULL) - return KERN_INVALID_OBJECT; - - if (object == kernel_object) { - printf("Warning: Attempt to map kernel object" - " by a non-private kernel entity\n"); - return KERN_INVALID_OBJECT; } - vm_object_lock(object); - object->ref_count++; - vm_object_res_reference(object); +#if __arm64__ + if (fourk) { + map_addr = vm_map_trunc_page(*address, FOURK_PAGE_MASK); + map_size = vm_map_round_page(initial_size, FOURK_PAGE_MASK); + } else +#endif /* __arm64__ */ + { + map_addr = vm_map_trunc_page(*address, + VM_MAP_PAGE_MASK(target_map)); + map_size = vm_map_round_page(initial_size, + VM_MAP_PAGE_MASK(target_map)); + } + size = vm_object_round_page(initial_size); /* - * For "named" VM objects, let the pager know that the - * memory object is being mapped. Some pagers need to keep - * track of this, to know when they can reclaim the memory - * object, for example. - * VM calls memory_object_map() for each mapping (specifying - * the protection of each mapping) and calls - * memory_object_last_unmap() when all the mappings are gone. + * Find the vm object (if any) corresponding to this port. */ - pager_prot = max_protection; - if (copy) { - pager_prot &= ~VM_PROT_WRITE; - } - pager = object->pager; - if (object->named && - pager != MEMORY_OBJECT_NULL && - object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { - assert(object->pager_ready); - vm_object_mapping_wait(object, THREAD_UNINT); - vm_object_mapping_begin(object); - vm_object_unlock(object); + if (!IP_VALID(port)) { + object = VM_OBJECT_NULL; + offset = 0; + copy = FALSE; + } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) { + vm_named_entry_t named_entry; - kr = memory_object_map(pager, pager_prot); - assert(kr == KERN_SUCCESS); + named_entry = (vm_named_entry_t) ip_get_kobject(port); + + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + offset += named_entry->data_offset; + } + + /* a few checks to make sure user is obeying rules */ + if (size == 0) { + if (offset >= named_entry->size) { + return KERN_INVALID_RIGHT; + } + size = named_entry->size - offset; + } + if (mask_max_protection) { + max_protection &= named_entry->protection; + } + if (mask_cur_protection) { + cur_protection &= named_entry->protection; + } + if ((named_entry->protection & max_protection) != + max_protection) { + return KERN_INVALID_RIGHT; + } + if ((named_entry->protection & cur_protection) != + cur_protection) { + return KERN_INVALID_RIGHT; + } + if (offset + size < offset) { + /* overflow */ + return KERN_INVALID_ARGUMENT; + } + if (named_entry->size < (offset + initial_size)) { + return KERN_INVALID_ARGUMENT; + } + + if (named_entry->is_copy) { + /* for a vm_map_copy, we can only map it whole */ + if ((size != named_entry->size) && + (vm_map_round_page(size, + VM_MAP_PAGE_MASK(target_map)) == + named_entry->size)) { + /* XXX FBDP use the rounded size... */ + size = vm_map_round_page( + size, + VM_MAP_PAGE_MASK(target_map)); + } + + if (!(flags & VM_FLAGS_ANYWHERE) && + (offset != 0 || + size != named_entry->size)) { + /* + * XXX for a mapping at a "fixed" address, + * we can't trim after mapping the whole + * memory entry, so reject a request for a + * partial mapping. + */ + return KERN_INVALID_ARGUMENT; + } + } + + /* the callers parameter offset is defined to be the */ + /* offset from beginning of named entry offset in object */ + offset = offset + named_entry->offset; + + if (!VM_MAP_PAGE_ALIGNED(size, + VM_MAP_PAGE_MASK(target_map))) { + /* + * Let's not map more than requested; + * vm_map_enter() will handle this "not map-aligned" + * case. + */ + map_size = size; + } + + named_entry_lock(named_entry); + if (named_entry->is_sub_map) { + vm_map_t submap; + + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap."); + } + + submap = named_entry->backing.map; + vm_map_lock(submap); + vm_map_reference(submap); + vm_map_unlock(submap); + named_entry_unlock(named_entry); + + vmk_flags.vmkf_submap = TRUE; + + result = vm_map_enter(target_map, + &map_addr, + map_size, + mask, + flags, + vmk_flags, + tag, + (vm_object_t)(uintptr_t) submap, + offset, + copy, + cur_protection, + max_protection, + inheritance); + if (result != KERN_SUCCESS) { + vm_map_deallocate(submap); + } else { + /* + * No need to lock "submap" just to check its + * "mapped" flag: that flag is never reset + * once it's been set and if we race, we'll + * just end up setting it twice, which is OK. + */ + if (submap->mapped_in_other_pmaps == FALSE && + vm_map_pmap(submap) != PMAP_NULL && + vm_map_pmap(submap) != + vm_map_pmap(target_map)) { + /* + * This submap is being mapped in a map + * that uses a different pmap. + * Set its "mapped_in_other_pmaps" flag + * to indicate that we now need to + * remove mappings from all pmaps rather + * than just the submap's pmap. + */ + vm_map_lock(submap); + submap->mapped_in_other_pmaps = TRUE; + vm_map_unlock(submap); + } + *address = map_addr; + } + return result; + } else if (named_entry->is_copy) { + kern_return_t kr; + vm_map_copy_t copy_map; + vm_map_entry_t copy_entry; + vm_map_offset_t copy_addr; + + if (flags & ~(VM_FLAGS_FIXED | + VM_FLAGS_ANYWHERE | + VM_FLAGS_OVERWRITE | + VM_FLAGS_RETURN_4K_DATA_ADDR | + VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_ALIAS_MASK)) { + named_entry_unlock(named_entry); + return KERN_INVALID_ARGUMENT; + } + + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + offset_in_mapping = offset - vm_object_trunc_page(offset); + if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) { + offset_in_mapping &= ~((signed)(0xFFF)); + } + offset = vm_object_trunc_page(offset); + map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; + } + + copy_map = named_entry->backing.copy; + assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST); + if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) { + /* unsupported type; should not happen */ + printf("vm_map_enter_mem_object: " + "memory_entry->backing.copy " + "unsupported type 0x%x\n", + copy_map->type); + named_entry_unlock(named_entry); + return KERN_INVALID_ARGUMENT; + } + + /* reserve a contiguous range */ + kr = vm_map_enter(target_map, + &map_addr, + /* map whole mem entry, trim later: */ + named_entry->size, + mask, + flags & (VM_FLAGS_ANYWHERE | + VM_FLAGS_OVERWRITE | + VM_FLAGS_RETURN_4K_DATA_ADDR | + VM_FLAGS_RETURN_DATA_ADDR), + vmk_flags, + tag, + VM_OBJECT_NULL, + 0, + FALSE, /* copy */ + cur_protection, + max_protection, + inheritance); + if (kr != KERN_SUCCESS) { + named_entry_unlock(named_entry); + return kr; + } + + copy_addr = map_addr; + + for (copy_entry = vm_map_copy_first_entry(copy_map); + copy_entry != vm_map_copy_to_entry(copy_map); + copy_entry = copy_entry->vme_next) { + int remap_flags; + vm_map_kernel_flags_t vmk_remap_flags; + vm_map_t copy_submap; + vm_object_t copy_object; + vm_map_size_t copy_size; + vm_object_offset_t copy_offset; + int copy_vm_alias; + + remap_flags = 0; + vmk_remap_flags = VM_MAP_KERNEL_FLAGS_NONE; + + copy_object = VME_OBJECT(copy_entry); + copy_offset = VME_OFFSET(copy_entry); + copy_size = (copy_entry->vme_end - + copy_entry->vme_start); + VM_GET_FLAGS_ALIAS(flags, copy_vm_alias); + if (copy_vm_alias == 0) { + /* + * Caller does not want a specific + * alias for this new mapping: use + * the alias of the original mapping. + */ + copy_vm_alias = VME_ALIAS(copy_entry); + } + + /* sanity check */ + if ((copy_addr + copy_size) > + (map_addr + + named_entry->size /* XXX full size */)) { + /* over-mapping too much !? */ + kr = KERN_INVALID_ARGUMENT; + /* abort */ + break; + } + + /* take a reference on the object */ + if (copy_entry->is_sub_map) { + vmk_remap_flags.vmkf_submap = TRUE; + copy_submap = VME_SUBMAP(copy_entry); + vm_map_lock(copy_submap); + vm_map_reference(copy_submap); + vm_map_unlock(copy_submap); + copy_object = (vm_object_t)(uintptr_t) copy_submap; + } else if (!copy && + copy_object != VM_OBJECT_NULL && + (copy_entry->needs_copy || + copy_object->shadowed || + (!copy_object->true_share && + !copy_entry->is_shared && + copy_object->vo_size > copy_size))) { + /* + * We need to resolve our side of this + * "symmetric" copy-on-write now; we + * need a new object to map and share, + * instead of the current one which + * might still be shared with the + * original mapping. + * + * Note: A "vm_map_copy_t" does not + * have a lock but we're protected by + * the named entry's lock here. + */ + // assert(copy_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC); + VME_OBJECT_SHADOW(copy_entry, copy_size); + if (!copy_entry->needs_copy && + copy_entry->protection & VM_PROT_WRITE) { + vm_prot_t prot; + + prot = copy_entry->protection & ~VM_PROT_WRITE; + vm_object_pmap_protect(copy_object, + copy_offset, + copy_size, + PMAP_NULL, + 0, + prot); + } + + copy_entry->needs_copy = FALSE; + copy_entry->is_shared = TRUE; + copy_object = VME_OBJECT(copy_entry); + copy_offset = VME_OFFSET(copy_entry); + vm_object_lock(copy_object); + vm_object_reference_locked(copy_object); + if (copy_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { + /* we're about to make a shared mapping of this object */ + copy_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + copy_object->true_share = TRUE; + } + vm_object_unlock(copy_object); + } else { + /* + * We already have the right object + * to map. + */ + copy_object = VME_OBJECT(copy_entry); + vm_object_reference(copy_object); + } + + /* over-map the object into destination */ + remap_flags |= flags; + remap_flags |= VM_FLAGS_FIXED; + remap_flags |= VM_FLAGS_OVERWRITE; + remap_flags &= ~VM_FLAGS_ANYWHERE; + if (!copy && !copy_entry->is_sub_map) { + /* + * copy-on-write should have been + * resolved at this point, or we would + * end up sharing instead of copying. + */ + assert(!copy_entry->needs_copy); + } +#if !CONFIG_EMBEDDED + if (copy_entry->used_for_jit) { + vmk_remap_flags.vmkf_map_jit = TRUE; + } +#endif /* !CONFIG_EMBEDDED */ + kr = vm_map_enter(target_map, + ©_addr, + copy_size, + (vm_map_offset_t) 0, + remap_flags, + vmk_remap_flags, + copy_vm_alias, + copy_object, + copy_offset, + ((copy_object == NULL) ? FALSE : copy), + cur_protection, + max_protection, + inheritance); + if (kr != KERN_SUCCESS) { + if (copy_entry->is_sub_map) { + vm_map_deallocate(copy_submap); + } else { + vm_object_deallocate(copy_object); + } + /* abort */ + break; + } + + /* next mapping */ + copy_addr += copy_size; + } + + if (kr == KERN_SUCCESS) { + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + *address = map_addr + offset_in_mapping; + } else { + *address = map_addr; + } + + if (offset) { + /* + * Trim in front, from 0 to "offset". + */ + vm_map_remove(target_map, + map_addr, + map_addr + offset, + VM_MAP_REMOVE_NO_FLAGS); + *address += offset; + } + if (offset + map_size < named_entry->size) { + /* + * Trim in back, from + * "offset + map_size" to + * "named_entry->size". + */ + vm_map_remove(target_map, + (map_addr + + offset + map_size), + (map_addr + + named_entry->size), + VM_MAP_REMOVE_NO_FLAGS); + } + } + named_entry_unlock(named_entry); + + if (kr != KERN_SUCCESS) { + if (!(flags & VM_FLAGS_OVERWRITE)) { + /* deallocate the contiguous range */ + (void) vm_deallocate(target_map, + map_addr, + map_size); + } + } + + return kr; + } else { + unsigned int access; + vm_prot_t protections; + unsigned int wimg_mode; + + /* we are mapping a VM object */ + + protections = named_entry->protection & VM_PROT_ALL; + access = GET_MAP_MEM(named_entry->protection); + + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + offset_in_mapping = offset - vm_object_trunc_page(offset); + if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) { + offset_in_mapping &= ~((signed)(0xFFF)); + } + offset = vm_object_trunc_page(offset); + map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; + } + + object = named_entry->backing.object; + assert(object != VM_OBJECT_NULL); + vm_object_lock(object); + named_entry_unlock(named_entry); + + vm_object_reference_locked(object); + + wimg_mode = object->wimg_bits; + vm_prot_to_wimg(access, &wimg_mode); + if (object->wimg_bits != wimg_mode) { + vm_object_change_wimg_mode(object, wimg_mode); + } + + vm_object_unlock(object); + } + } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) { + /* + * JMM - This is temporary until we unify named entries + * and raw memory objects. + * + * Detected fake ip_kotype for a memory object. In + * this case, the port isn't really a port at all, but + * instead is just a raw memory object. + */ + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + panic("VM_FLAGS_RETURN_DATA_ADDR not expected for raw memory object."); + } + + object = memory_object_to_vm_object((memory_object_t)port); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_OBJECT; + } + vm_object_reference(object); + + /* wait for object (if any) to be ready */ + if (object != VM_OBJECT_NULL) { + if (object == kernel_object) { + printf("Warning: Attempt to map kernel object" + " by a non-private kernel entity\n"); + return KERN_INVALID_OBJECT; + } + if (!object->pager_ready) { + vm_object_lock(object); + + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_unlock(object); + } + } + } else { + return KERN_INVALID_OBJECT; + } + + if (object != VM_OBJECT_NULL && + object->named && + object->pager != MEMORY_OBJECT_NULL && + object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { + memory_object_t pager; + vm_prot_t pager_prot; + kern_return_t kr; + /* + * For "named" VM objects, let the pager know that the + * memory object is being mapped. Some pagers need to keep + * track of this, to know when they can reclaim the memory + * object, for example. + * VM calls memory_object_map() for each mapping (specifying + * the protection of each mapping) and calls + * memory_object_last_unmap() when all the mappings are gone. + */ + pager_prot = max_protection; + if (copy) { + /* + * Copy-On-Write mapping: won't modify the + * memory object. + */ + pager_prot &= ~VM_PROT_WRITE; + } vm_object_lock(object); - vm_object_mapping_end(object); + pager = object->pager; + if (object->named && + pager != MEMORY_OBJECT_NULL && + object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { + assert(object->pager_ready); + vm_object_mapping_wait(object, THREAD_UNINT); + vm_object_mapping_begin(object); + vm_object_unlock(object); + + kr = memory_object_map(pager, pager_prot); + assert(kr == KERN_SUCCESS); + + vm_object_lock(object); + vm_object_mapping_end(object); + } + vm_object_unlock(object); } - vm_object_unlock(object); /* * Perform the copy if requested */ if (copy) { - vm_object_t new_object; - vm_object_offset_t new_offset; + vm_object_t new_object; + vm_object_offset_t new_offset; - result = vm_object_copy_strategically(object, offset, size, - &new_object, &new_offset, - ©); + result = vm_object_copy_strategically(object, offset, + map_size, + &new_object, &new_offset, + ©); if (result == KERN_MEMORY_RESTART_COPY) { @@ -2624,9 +4563,10 @@ vm_map_enter_mem_object_control( new_object = object; new_offset = offset; success = vm_object_copy_quickly(&new_object, - new_offset, size, - &src_needs_copy, - ©); + new_offset, + map_size, + &src_needs_copy, + ©); assert(success); result = KERN_SUCCESS; } @@ -2637,32 +4577,396 @@ vm_map_enter_mem_object_control( vm_object_deallocate(object); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { return result; + } object = new_object; offset = new_offset; } - result = vm_map_enter(target_map, - &map_addr, map_size, - (vm_map_offset_t)mask, - flags, - object, offset, - copy, - cur_protection, max_protection, inheritance); - if (result != KERN_SUCCESS) + /* + * If non-kernel users want to try to prefault pages, the mapping and prefault + * needs to be atomic. + */ + kernel_prefault = (try_prefault && vm_kernel_map_is_kernel(target_map)); + vmk_flags.vmkf_keep_map_locked = (try_prefault && !kernel_prefault); + +#if __arm64__ + if (fourk) { + /* map this object in a "4K" pager */ + result = vm_map_enter_fourk(target_map, + &map_addr, + map_size, + (vm_map_offset_t) mask, + flags, + vmk_flags, + tag, + object, + offset, + copy, + cur_protection, + max_protection, + inheritance); + } else +#endif /* __arm64__ */ + { + result = vm_map_enter(target_map, + &map_addr, map_size, + (vm_map_offset_t)mask, + flags, + vmk_flags, + tag, + object, offset, + copy, + cur_protection, max_protection, + inheritance); + } + if (result != KERN_SUCCESS) { vm_object_deallocate(object); - *address = map_addr; - - return result; -} - + } -#if VM_CPM + /* + * Try to prefault, and do not forget to release the vm map lock. + */ + if (result == KERN_SUCCESS && try_prefault) { + mach_vm_address_t va = map_addr; + kern_return_t kr = KERN_SUCCESS; + unsigned int i = 0; + int pmap_options; + + pmap_options = kernel_prefault ? 0 : PMAP_OPTIONS_NOWAIT; + if (object->internal) { + pmap_options |= PMAP_OPTIONS_INTERNAL; + } -#ifdef MACH_ASSERT -extern pmap_paddr_t avail_start, avail_end; + for (i = 0; i < page_list_count; ++i) { + if (!UPL_VALID_PAGE(page_list, i)) { + if (kernel_prefault) { + assertf(FALSE, "kernel_prefault && !UPL_VALID_PAGE"); + result = KERN_MEMORY_ERROR; + break; + } + } else { + /* + * If this function call failed, we should stop + * trying to optimize, other calls are likely + * going to fail too. + * + * We are not gonna report an error for such + * failure though. That's an optimization, not + * something critical. + */ + kr = pmap_enter_options(target_map->pmap, + va, UPL_PHYS_PAGE(page_list, i), + cur_protection, VM_PROT_NONE, + 0, TRUE, pmap_options, NULL); + if (kr != KERN_SUCCESS) { + OSIncrementAtomic64(&vm_prefault_nb_bailout); + if (kernel_prefault) { + result = kr; + } + break; + } + OSIncrementAtomic64(&vm_prefault_nb_pages); + } + + /* Next virtual address */ + va += PAGE_SIZE; + } + if (vmk_flags.vmkf_keep_map_locked) { + vm_map_unlock(target_map); + } + } + + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + *address = map_addr + offset_in_mapping; + } else { + *address = map_addr; + } + return result; +} + +kern_return_t +vm_map_enter_mem_object( + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + kern_return_t ret; + + ret = vm_map_enter_mem_object_helper(target_map, + address, + initial_size, + mask, + flags, + vmk_flags, + tag, + port, + offset, + copy, + cur_protection, + max_protection, + inheritance, + NULL, + 0); + +#if KASAN + if (ret == KERN_SUCCESS && address && target_map->pmap == kernel_pmap) { + kasan_notify_address(*address, initial_size); + } +#endif + + return ret; +} + +kern_return_t +vm_map_enter_mem_object_prefault( + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + vm_prot_t cur_protection, + vm_prot_t max_protection, + upl_page_list_ptr_t page_list, + unsigned int page_list_count) +{ + kern_return_t ret; + + ret = vm_map_enter_mem_object_helper(target_map, + address, + initial_size, + mask, + flags, + vmk_flags, + tag, + port, + offset, + FALSE, + cur_protection, + max_protection, + VM_INHERIT_DEFAULT, + page_list, + page_list_count); + +#if KASAN + if (ret == KERN_SUCCESS && address && target_map->pmap == kernel_pmap) { + kasan_notify_address(*address, initial_size); + } +#endif + + return ret; +} + + +kern_return_t +vm_map_enter_mem_object_control( + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + memory_object_control_t control, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_map_address_t map_addr; + vm_map_size_t map_size; + vm_object_t object; + vm_object_size_t size; + kern_return_t result; + memory_object_t pager; + vm_prot_t pager_prot; + kern_return_t kr; +#if __arm64__ + boolean_t fourk = vmk_flags.vmkf_fourk; +#endif /* __arm64__ */ + + /* + * Check arguments for validity + */ + if ((target_map == VM_MAP_NULL) || + (cur_protection & ~VM_PROT_ALL) || + (max_protection & ~VM_PROT_ALL) || + (inheritance > VM_INHERIT_LAST_VALID) || + initial_size == 0) { + return KERN_INVALID_ARGUMENT; + } + +#if __arm64__ + if (fourk) { + map_addr = vm_map_trunc_page(*address, + FOURK_PAGE_MASK); + map_size = vm_map_round_page(initial_size, + FOURK_PAGE_MASK); + } else +#endif /* __arm64__ */ + { + map_addr = vm_map_trunc_page(*address, + VM_MAP_PAGE_MASK(target_map)); + map_size = vm_map_round_page(initial_size, + VM_MAP_PAGE_MASK(target_map)); + } + size = vm_object_round_page(initial_size); + + object = memory_object_control_to_vm_object(control); + + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_OBJECT; + } + + if (object == kernel_object) { + printf("Warning: Attempt to map kernel object" + " by a non-private kernel entity\n"); + return KERN_INVALID_OBJECT; + } + + vm_object_lock(object); + object->ref_count++; + vm_object_res_reference(object); + + /* + * For "named" VM objects, let the pager know that the + * memory object is being mapped. Some pagers need to keep + * track of this, to know when they can reclaim the memory + * object, for example. + * VM calls memory_object_map() for each mapping (specifying + * the protection of each mapping) and calls + * memory_object_last_unmap() when all the mappings are gone. + */ + pager_prot = max_protection; + if (copy) { + pager_prot &= ~VM_PROT_WRITE; + } + pager = object->pager; + if (object->named && + pager != MEMORY_OBJECT_NULL && + object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { + assert(object->pager_ready); + vm_object_mapping_wait(object, THREAD_UNINT); + vm_object_mapping_begin(object); + vm_object_unlock(object); + + kr = memory_object_map(pager, pager_prot); + assert(kr == KERN_SUCCESS); + + vm_object_lock(object); + vm_object_mapping_end(object); + } + vm_object_unlock(object); + + /* + * Perform the copy if requested + */ + + if (copy) { + vm_object_t new_object; + vm_object_offset_t new_offset; + + result = vm_object_copy_strategically(object, offset, size, + &new_object, &new_offset, + ©); + + + if (result == KERN_MEMORY_RESTART_COPY) { + boolean_t success; + boolean_t src_needs_copy; + + /* + * XXX + * We currently ignore src_needs_copy. + * This really is the issue of how to make + * MEMORY_OBJECT_COPY_SYMMETRIC safe for + * non-kernel users to use. Solution forthcoming. + * In the meantime, since we don't allow non-kernel + * memory managers to specify symmetric copy, + * we won't run into problems here. + */ + new_object = object; + new_offset = offset; + success = vm_object_copy_quickly(&new_object, + new_offset, size, + &src_needs_copy, + ©); + assert(success); + result = KERN_SUCCESS; + } + /* + * Throw away the reference to the + * original object, as it won't be mapped. + */ + + vm_object_deallocate(object); + + if (result != KERN_SUCCESS) { + return result; + } + + object = new_object; + offset = new_offset; + } + +#if __arm64__ + if (fourk) { + result = vm_map_enter_fourk(target_map, + &map_addr, + map_size, + (vm_map_offset_t)mask, + flags, + vmk_flags, + tag, + object, offset, + copy, + cur_protection, max_protection, + inheritance); + } else +#endif /* __arm64__ */ + { + result = vm_map_enter(target_map, + &map_addr, map_size, + (vm_map_offset_t)mask, + flags, + vmk_flags, + tag, + object, offset, + copy, + cur_protection, max_protection, + inheritance); + } + if (result != KERN_SUCCESS) { + vm_object_deallocate(object); + } + *address = map_addr; + + return result; +} + + +#if VM_CPM + +#ifdef MACH_ASSERT +extern pmap_paddr_t avail_start, avail_end; #endif /* @@ -2677,50 +4981,55 @@ extern pmap_paddr_t avail_start, avail_end; */ kern_return_t vm_map_enter_cpm( - vm_map_t map, - vm_map_offset_t *addr, - vm_map_size_t size, - int flags) -{ - vm_object_t cpm_obj; - pmap_t pmap; - vm_page_t m, pages; - kern_return_t kr; - vm_map_offset_t va, start, end, offset; -#if MACH_ASSERT - vm_map_offset_t prev_addr; -#endif /* MACH_ASSERT */ - - boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); - - if (!vm_allocate_cpm_enabled) - return KERN_FAILURE; + vm_map_t map, + vm_map_offset_t *addr, + vm_map_size_t size, + int flags) +{ + vm_object_t cpm_obj; + pmap_t pmap; + vm_page_t m, pages; + kern_return_t kr; + vm_map_offset_t va, start, end, offset; +#if MACH_ASSERT + vm_map_offset_t prev_addr = 0; +#endif /* MACH_ASSERT */ + + boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); + vm_tag_t tag; + + VM_GET_FLAGS_ALIAS(flags, tag); if (size == 0) { *addr = 0; return KERN_SUCCESS; } - if (anywhere) + if (anywhere) { *addr = vm_map_min(map); - else - *addr = vm_map_trunc_page(*addr); - size = vm_map_round_page(size); + } else { + *addr = vm_map_trunc_page(*addr, + VM_MAP_PAGE_MASK(map)); + } + size = vm_map_round_page(size, + VM_MAP_PAGE_MASK(map)); /* * LP64todo - cpm_allocate should probably allow * allocations of >4GB, but not with the current * algorithm, so just cast down the size for now. */ - if (size > VM_MAX_ADDRESS) + if (size > VM_MAX_ADDRESS) { return KERN_RESOURCE_SHORTAGE; + } if ((kr = cpm_allocate(CAST_DOWN(vm_size_t, size), - &pages, 0, 0, TRUE, flags)) != KERN_SUCCESS) + &pages, 0, 0, TRUE, flags)) != KERN_SUCCESS) { return kr; + } cpm_obj = vm_object_allocate((vm_object_size_t)size); assert(cpm_obj != VM_OBJECT_NULL); assert(cpm_obj->internal); - assert(cpm_obj->size == (vm_object_size_t)size); + assert(cpm_obj->vo_size == (vm_object_size_t)size); assert(cpm_obj->can_persist == FALSE); assert(cpm_obj->pager_created == FALSE); assert(cpm_obj->pageout == FALSE); @@ -2736,22 +5045,15 @@ vm_map_enter_cpm( pages = NEXT_PAGE(m); *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL; - assert(!m->gobbled); - assert(!m->wanted); - assert(!m->pageout); - assert(!m->tabled); + assert(!m->vmp_gobbled); + assert(!m->vmp_wanted); + assert(!m->vmp_pageout); + assert(!m->vmp_tabled); assert(VM_PAGE_WIRED(m)); - /* - * ENCRYPTED SWAP: - * "m" is not supposed to be pageable, so it - * should not be encrypted. It wouldn't be safe - * to enter it in a new VM object while encrypted. - */ - ASSERT_PAGE_DECRYPTED(m); - assert(m->busy); - assert(m->phys_page>=(avail_start>>PAGE_SHIFT) && m->phys_page<=(avail_end>>PAGE_SHIFT)); + assert(m->vmp_busy); + assert(VM_PAGE_GET_PHYS_PAGE(m) >= (avail_start >> PAGE_SHIFT) && VM_PAGE_GET_PHYS_PAGE(m) <= (avail_end >> PAGE_SHIFT)); - m->busy = FALSE; + m->vmp_busy = FALSE; vm_page_insert(m, cpm_obj, offset); } assert(cpm_obj->resident_page_count == size / PAGE_SIZE); @@ -2780,6 +5082,7 @@ vm_map_enter_cpm( size, (vm_map_offset_t)0, flags, + VM_MAP_KERNEL_FLAGS_NONE, cpm_obj, (vm_object_offset_t)0, FALSE, @@ -2818,8 +5121,8 @@ vm_map_enter_cpm( * again. */ for (offset = 0, va = start; offset < size; - va += PAGE_SIZE, offset += PAGE_SIZE) { - int type_of_fault; + va += PAGE_SIZE, offset += PAGE_SIZE) { + int type_of_fault; vm_object_lock(cpm_obj); m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); @@ -2830,13 +5133,20 @@ vm_map_enter_cpm( type_of_fault = DBG_ZERO_FILL_FAULT; vm_fault_enter(m, pmap, va, VM_PROT_ALL, VM_PROT_WRITE, - VM_PAGE_WIRED(m), FALSE, FALSE, FALSE, - &type_of_fault); + VM_PAGE_WIRED(m), + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + FALSE, /* no_cache */ + FALSE, /* cs_bypass */ + 0, /* user_tag */ + 0, /* pmap_options */ + NULL, /* need_retry */ + &type_of_fault); vm_object_unlock(cpm_obj); } -#if MACH_ASSERT +#if MACH_ASSERT /* * Verify ordering in address space. */ @@ -2844,32 +5154,33 @@ vm_map_enter_cpm( vm_object_lock(cpm_obj); m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); vm_object_unlock(cpm_obj); - if (m == VM_PAGE_NULL) - panic("vm_allocate_cpm: obj 0x%x off 0x%x no page", - cpm_obj, offset); - assert(m->tabled); - assert(!m->busy); - assert(!m->wanted); - assert(!m->fictitious); - assert(!m->private); - assert(!m->absent); - assert(!m->error); - assert(!m->cleaning); - assert(!m->precious); - assert(!m->clustered); + if (m == VM_PAGE_NULL) { + panic("vm_allocate_cpm: obj %p off 0x%llx no page", + cpm_obj, (uint64_t)offset); + } + assert(m->vmp_tabled); + assert(!m->vmp_busy); + assert(!m->vmp_wanted); + assert(!m->vmp_fictitious); + assert(!m->vmp_private); + assert(!m->vmp_absent); + assert(!m->vmp_error); + assert(!m->vmp_cleaning); + assert(!m->vmp_laundry); + assert(!m->vmp_precious); + assert(!m->vmp_clustered); if (offset != 0) { - if (m->phys_page != prev_addr + 1) { - printf("start 0x%x end 0x%x va 0x%x\n", - start, end, va); - printf("obj 0x%x off 0x%x\n", cpm_obj, offset); - printf("m 0x%x prev_address 0x%x\n", m, - prev_addr); + if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) { + printf("start 0x%llx end 0x%llx va 0x%llx\n", + (uint64_t)start, (uint64_t)end, (uint64_t)va); + printf("obj %p off 0x%llx\n", cpm_obj, (uint64_t)offset); + printf("m %p prev_address 0x%llx\n", m, (uint64_t)prev_addr); panic("vm_allocate_cpm: pages not contig!"); } } - prev_addr = m->phys_page; + prev_addr = VM_PAGE_GET_PHYS_PAGE(m); } -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ vm_object_deallocate(cpm_obj); /* kill extra ref */ @@ -2877,7 +5188,7 @@ vm_map_enter_cpm( } -#else /* VM_CPM */ +#else /* VM_CPM */ /* * Interface is defined in all cases, but unless the kernel @@ -2887,10 +5198,10 @@ vm_map_enter_cpm( kern_return_t vm_map_enter_cpm( - __unused vm_map_t map, - __unused vm_map_offset_t *addr, - __unused vm_map_size_t size, - __unused int flags) + __unused vm_map_t map, + __unused vm_map_offset_t *addr, + __unused vm_map_size_t size, + __unused int flags) { return KERN_FAILURE; } @@ -2905,16 +5216,17 @@ vm_map_enter_cpm( static void vm_map_clip_unnest( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t start_unnest, - vm_map_offset_t end_unnest) + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t start_unnest, + vm_map_offset_t end_unnest) { vm_map_offset_t old_start_unnest = start_unnest; vm_map_offset_t old_end_unnest = end_unnest; assert(entry->is_sub_map); - assert(entry->object.sub_map != NULL); + assert(VME_SUBMAP(entry) != NULL); + assert(entry->use_pmap); /* * Query the platform for the optimal unnest range. @@ -2925,44 +5237,64 @@ vm_map_clip_unnest( * depending on size/alignment. */ if (pmap_adjust_unnest_parameters(map->pmap, &start_unnest, &end_unnest)) { - log_unnest_badness(map, old_start_unnest, old_end_unnest); + assert(VME_SUBMAP(entry)->is_nested_map); + assert(!VME_SUBMAP(entry)->disable_vmentry_reuse); + log_unnest_badness(map, + old_start_unnest, + old_end_unnest, + VME_SUBMAP(entry)->is_nested_map, + (entry->vme_start + + VME_SUBMAP(entry)->lowest_unnestable_start - + VME_OFFSET(entry))); } if (entry->vme_start > start_unnest || entry->vme_end < end_unnest) { panic("vm_map_clip_unnest(0x%llx,0x%llx): " - "bad nested entry: start=0x%llx end=0x%llx\n", - (long long)start_unnest, (long long)end_unnest, - (long long)entry->vme_start, (long long)entry->vme_end); + "bad nested entry: start=0x%llx end=0x%llx\n", + (long long)start_unnest, (long long)end_unnest, + (long long)entry->vme_start, (long long)entry->vme_end); } if (start_unnest > entry->vme_start) { _vm_map_clip_start(&map->hdr, - entry, - start_unnest); - vm_map_store_update_first_free(map, map->first_free); + entry, + start_unnest); + if (map->holelistenabled) { + vm_map_store_update_first_free(map, NULL, FALSE); + } else { + vm_map_store_update_first_free(map, map->first_free, FALSE); + } } if (entry->vme_end > end_unnest) { _vm_map_clip_end(&map->hdr, - entry, - end_unnest); - vm_map_store_update_first_free(map, map->first_free); + entry, + end_unnest); + if (map->holelistenabled) { + vm_map_store_update_first_free(map, NULL, FALSE); + } else { + vm_map_store_update_first_free(map, map->first_free, FALSE); + } } pmap_unnest(map->pmap, - entry->vme_start, - entry->vme_end - entry->vme_start); - if ((map->mapped) && (map->ref_count)) { + entry->vme_start, + entry->vme_end - entry->vme_start); + if ((map->mapped_in_other_pmaps) && os_ref_get_count(&map->map_refcnt) != 0) { /* clean up parent map/maps */ vm_map_submap_pmap_clean( map, entry->vme_start, entry->vme_end, - entry->object.sub_map, - entry->offset); + VME_SUBMAP(entry), + VME_OFFSET(entry)); } entry->use_pmap = FALSE; + if ((map->pmap != kernel_pmap) && + (VME_ALIAS(entry) == VM_MEMORY_SHARED_PMAP)) { + VME_ALIAS_SET(entry, VM_MEMORY_UNSHARED_PMAP); + } } -#endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ /* * vm_map_clip_start: [ internal use only ] @@ -2971,16 +5303,17 @@ vm_map_clip_unnest( * the specified address; if necessary, * it splits the entry into two. */ -static void +void vm_map_clip_start( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t startaddr) + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t startaddr) { #ifndef NO_NESTED_PMAP - if (entry->use_pmap && + if (entry->is_sub_map && + entry->use_pmap && startaddr >= entry->vme_start) { - vm_map_offset_t start_unnest, end_unnest; + vm_map_offset_t start_unnest, end_unnest; /* * Make sure "startaddr" is no longer in a nested range @@ -2995,15 +5328,31 @@ vm_map_clip_start( } #endif /* NO_NESTED_PMAP */ if (startaddr > entry->vme_start) { - if (entry->object.vm_object && + if (VME_OBJECT(entry) && !entry->is_sub_map && - entry->object.vm_object->phys_contiguous) { + VME_OBJECT(entry)->phys_contiguous) { pmap_remove(map->pmap, - (addr64_t)(entry->vme_start), - (addr64_t)(entry->vme_end)); + (addr64_t)(entry->vme_start), + (addr64_t)(entry->vme_end)); + } + if (entry->vme_atomic) { + panic("Attempting to clip an atomic VM entry! (map: %p, entry: %p)\n", map, entry); } + + DTRACE_VM5( + vm_map_clip_start, + vm_map_t, map, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + vm_map_offset_t, startaddr, + int, VME_ALIAS(entry)); + _vm_map_clip_start(&map->hdr, entry, startaddr); - vm_map_store_update_first_free(map, map->first_free); + if (map->holelistenabled) { + vm_map_store_update_first_free(map, NULL, FALSE); + } else { + vm_map_store_update_first_free(map, map->first_free, FALSE); + } } } @@ -3011,7 +5360,7 @@ vm_map_clip_start( #define vm_map_copy_clip_start(copy, entry, startaddr) \ MACRO_BEGIN \ if ((startaddr) > (entry)->vme_start) \ - _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \ + _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \ MACRO_END /* @@ -3020,11 +5369,11 @@ vm_map_clip_start( */ static void _vm_map_clip_start( - register struct vm_map_header *map_header, - register vm_map_entry_t entry, - register vm_map_offset_t start) + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t start) { - register vm_map_entry_t new_entry; + vm_map_entry_t new_entry; /* * Split off the front portion -- @@ -3034,19 +5383,27 @@ _vm_map_clip_start( * address. */ - new_entry = _vm_map_entry_create(map_header); + if (entry->map_aligned) { + assert(VM_MAP_PAGE_ALIGNED(start, + VM_MAP_HDR_PAGE_MASK(map_header))); + } + + new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); vm_map_entry_copy_full(new_entry, entry); new_entry->vme_end = start; - entry->offset += (start - entry->vme_start); + assert(new_entry->vme_start < new_entry->vme_end); + VME_OFFSET_SET(entry, VME_OFFSET(entry) + (start - entry->vme_start)); + assert(start < entry->vme_end); entry->vme_start = start; _vm_map_store_entry_link(map_header, entry->vme_prev, new_entry); - if (entry->is_sub_map) - vm_map_reference(new_entry->object.sub_map); - else - vm_object_reference(new_entry->object.vm_object); + if (entry->is_sub_map) { + vm_map_reference(VME_SUBMAP(new_entry)); + } else { + vm_object_reference(VME_OBJECT(new_entry)); + } } @@ -3057,11 +5414,11 @@ _vm_map_clip_start( * the specified address; if necessary, * it splits the entry into two. */ -static void +void vm_map_clip_end( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t endaddr) + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t endaddr) { if (endaddr > entry->vme_end) { /* @@ -3071,8 +5428,8 @@ vm_map_clip_end( endaddr = entry->vme_end; } #ifndef NO_NESTED_PMAP - if (entry->use_pmap) { - vm_map_offset_t start_unnest, end_unnest; + if (entry->is_sub_map && entry->use_pmap) { + vm_map_offset_t start_unnest, end_unnest; /* * Make sure the range between the start of this entry and @@ -3083,21 +5440,36 @@ vm_map_clip_end( */ start_unnest = entry->vme_start; end_unnest = - (endaddr + pmap_nesting_size_min - 1) & - ~(pmap_nesting_size_min - 1); + (endaddr + pmap_nesting_size_min - 1) & + ~(pmap_nesting_size_min - 1); vm_map_clip_unnest(map, entry, start_unnest, end_unnest); } #endif /* NO_NESTED_PMAP */ if (endaddr < entry->vme_end) { - if (entry->object.vm_object && + if (VME_OBJECT(entry) && !entry->is_sub_map && - entry->object.vm_object->phys_contiguous) { + VME_OBJECT(entry)->phys_contiguous) { pmap_remove(map->pmap, - (addr64_t)(entry->vme_start), - (addr64_t)(entry->vme_end)); + (addr64_t)(entry->vme_start), + (addr64_t)(entry->vme_end)); + } + if (entry->vme_atomic) { + panic("Attempting to clip an atomic VM entry! (map: %p, entry: %p)\n", map, entry); } + DTRACE_VM5( + vm_map_clip_end, + vm_map_t, map, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + vm_map_offset_t, endaddr, + int, VME_ALIAS(entry)); + _vm_map_clip_end(&map->hdr, entry, endaddr); - vm_map_store_update_first_free(map, map->first_free); + if (map->holelistenabled) { + vm_map_store_update_first_free(map, NULL, FALSE); + } else { + vm_map_store_update_first_free(map, map->first_free, FALSE); + } } } @@ -3105,7 +5477,7 @@ vm_map_clip_end( #define vm_map_copy_clip_end(copy, entry, endaddr) \ MACRO_BEGIN \ if ((endaddr) < (entry)->vme_end) \ - _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \ + _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \ MACRO_END /* @@ -3114,29 +5486,38 @@ vm_map_clip_end( */ static void _vm_map_clip_end( - register struct vm_map_header *map_header, - register vm_map_entry_t entry, - register vm_map_offset_t end) + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t end) { - register vm_map_entry_t new_entry; + vm_map_entry_t new_entry; /* * Create a new entry and insert it * AFTER the specified entry */ - new_entry = _vm_map_entry_create(map_header); + if (entry->map_aligned) { + assert(VM_MAP_PAGE_ALIGNED(end, + VM_MAP_HDR_PAGE_MASK(map_header))); + } + + new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); vm_map_entry_copy_full(new_entry, entry); + assert(entry->vme_start < end); new_entry->vme_start = entry->vme_end = end; - new_entry->offset += (end - entry->vme_start); + VME_OFFSET_SET(new_entry, + VME_OFFSET(new_entry) + (end - entry->vme_start)); + assert(new_entry->vme_start < new_entry->vme_end); _vm_map_store_entry_link(map_header, entry, new_entry); - if (entry->is_sub_map) - vm_map_reference(new_entry->object.sub_map); - else - vm_object_reference(new_entry->object.vm_object); + if (entry->is_sub_map) { + vm_map_reference(VME_SUBMAP(new_entry)); + } else { + vm_object_reference(VME_OBJECT(new_entry)); + } } @@ -3146,19 +5527,19 @@ _vm_map_clip_end( * Asserts that the starting and ending region * addresses fall within the valid range of the map. */ -#define VM_MAP_RANGE_CHECK(map, start, end) \ - MACRO_BEGIN \ - if (start < vm_map_min(map)) \ - start = vm_map_min(map); \ - if (end > vm_map_max(map)) \ - end = vm_map_max(map); \ - if (start > end) \ - start = end; \ +#define VM_MAP_RANGE_CHECK(map, start, end) \ + MACRO_BEGIN \ + if (start < vm_map_min(map)) \ + start = vm_map_min(map); \ + if (end > vm_map_max(map)) \ + end = vm_map_max(map); \ + if (start > end) \ + start = end; \ MACRO_END /* * vm_map_range_check: [ internal use only ] - * + * * Check that the region defined by the specified start and * end addresses are wholly contained within a single map * entry or set of adjacent map entries of the spacified map, @@ -3172,49 +5553,54 @@ _vm_map_clip_end( */ static boolean_t vm_map_range_check( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - vm_map_entry_t *entry) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_entry_t *entry) { - vm_map_entry_t cur; - register vm_map_offset_t prev; + vm_map_entry_t cur; + vm_map_offset_t prev; /* - * Basic sanity checks first + * Basic sanity checks first */ - if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) - return (FALSE); + if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) { + return FALSE; + } /* - * Check first if the region starts within a valid + * Check first if the region starts within a valid * mapping for the map. */ - if (!vm_map_lookup_entry(map, start, &cur)) - return (FALSE); + if (!vm_map_lookup_entry(map, start, &cur)) { + return FALSE; + } /* - * Optimize for the case that the region is contained + * Optimize for the case that the region is contained * in a single map entry. */ - if (entry != (vm_map_entry_t *) NULL) + if (entry != (vm_map_entry_t *) NULL) { *entry = cur; - if (end <= cur->vme_end) - return (TRUE); + } + if (end <= cur->vme_end) { + return TRUE; + } /* - * If the region is not wholly contained within a - * single entry, walk the entries looking for holes. + * If the region is not wholly contained within a + * single entry, walk the entries looking for holes. */ prev = cur->vme_end; cur = cur->vme_next; while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) { - if (end <= cur->vme_end) - return (TRUE); + if (end <= cur->vme_end) { + return TRUE; + } prev = cur->vme_end; cur = cur->vme_next; } - return (FALSE); + return FALSE; } /* @@ -3237,23 +5623,23 @@ vm_map_range_check( */ kern_return_t vm_map_submap( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_t submap, - vm_map_offset_t offset, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t submap, + vm_map_offset_t offset, #ifdef NO_NESTED_PMAP __unused -#endif /* NO_NESTED_PMAP */ - boolean_t use_pmap) +#endif /* NO_NESTED_PMAP */ + boolean_t use_pmap) { - vm_map_entry_t entry; - register kern_return_t result = KERN_INVALID_ARGUMENT; - register vm_object_t object; + vm_map_entry_t entry; + kern_return_t result = KERN_INVALID_ARGUMENT; + vm_object_t object; vm_map_lock(map); - if (! vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry(map, start, &entry)) { entry = entry->vme_next; } @@ -3263,52 +5649,70 @@ vm_map_submap( return KERN_INVALID_ARGUMENT; } - assert(!entry->use_pmap); /* we don't want to unnest anything here */ vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); if ((entry->vme_start == start) && (entry->vme_end == end) && (!entry->is_sub_map) && - ((object = entry->object.vm_object) == vm_submap_object) && + ((object = VME_OBJECT(entry)) == vm_submap_object) && (object->resident_page_count == 0) && (object->copy == VM_OBJECT_NULL) && (object->shadow == VM_OBJECT_NULL) && (!object->pager_created)) { - entry->offset = (vm_object_offset_t)offset; - entry->object.vm_object = VM_OBJECT_NULL; + VME_OFFSET_SET(entry, (vm_object_offset_t)offset); + VME_OBJECT_SET(entry, VM_OBJECT_NULL); vm_object_deallocate(object); entry->is_sub_map = TRUE; - entry->object.sub_map = submap; + entry->use_pmap = FALSE; + VME_SUBMAP_SET(entry, submap); vm_map_reference(submap); - submap->mapped = TRUE; + if (submap->mapped_in_other_pmaps == FALSE && + vm_map_pmap(submap) != PMAP_NULL && + vm_map_pmap(submap) != vm_map_pmap(map)) { + /* + * This submap is being mapped in a map + * that uses a different pmap. + * Set its "mapped_in_other_pmaps" flag + * to indicate that we now need to + * remove mappings from all pmaps rather + * than just the submap's pmap. + */ + submap->mapped_in_other_pmaps = TRUE; + } #ifndef NO_NESTED_PMAP if (use_pmap) { /* nest if platform code will allow */ - if(submap->pmap == NULL) { - submap->pmap = pmap_create((vm_map_size_t) 0, FALSE); - if(submap->pmap == PMAP_NULL) { + if (submap->pmap == NULL) { + ledger_t ledger = map->pmap->ledger; + submap->pmap = pmap_create_options(ledger, + (vm_map_size_t) 0, 0); + if (submap->pmap == PMAP_NULL) { vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } +#if defined(__arm__) || defined(__arm64__) + pmap_set_nested(submap->pmap); +#endif } result = pmap_nest(map->pmap, - (entry->object.sub_map)->pmap, - (addr64_t)start, - (addr64_t)start, - (uint64_t)(end - start)); - if(result) + (VME_SUBMAP(entry))->pmap, + (addr64_t)start, + (addr64_t)start, + (uint64_t)(end - start)); + if (result) { panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result); + } entry->use_pmap = TRUE; } -#else /* NO_NESTED_PMAP */ +#else /* NO_NESTED_PMAP */ pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end); -#endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ result = KERN_SUCCESS; } vm_map_unlock(map); - return(result); + return result; } /* @@ -3321,20 +5725,81 @@ vm_map_submap( */ kern_return_t vm_map_protect( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - register vm_prot_t new_prot, - register boolean_t set_max) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t new_prot, + boolean_t set_max) { - register vm_map_entry_t current; - register vm_map_offset_t prev; - vm_map_entry_t entry; - vm_prot_t new_max; + vm_map_entry_t current; + vm_map_offset_t prev; + vm_map_entry_t entry; + vm_prot_t new_max; + int pmap_options = 0; + kern_return_t kr; + + if (new_prot & VM_PROT_COPY) { + vm_map_offset_t new_start; + vm_prot_t cur_prot, max_prot; + vm_map_kernel_flags_t kflags; + + /* LP64todo - see below */ + if (start >= map->max_offset) { + return KERN_INVALID_ADDRESS; + } + +#if VM_PROTECT_WX_FAIL + if ((new_prot & VM_PROT_EXECUTE) && + map != kernel_map && + cs_process_enforcement(NULL)) { + DTRACE_VM3(cs_wx, + uint64_t, (uint64_t) start, + uint64_t, (uint64_t) end, + vm_prot_t, new_prot); + printf("CODE SIGNING: %d[%s] %s can't have both write and exec at the same time\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); + return KERN_PROTECTION_FAILURE; + } +#endif /* VM_PROTECT_WX_FAIL */ - XPR(XPR_VM_MAP, - "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d", - map, start, end, new_prot, set_max); + /* + * Let vm_map_remap_extract() know that it will need to: + * + make a copy of the mapping + * + add VM_PROT_WRITE to the max protections + * + remove any protections that are no longer allowed from the + * max protections (to avoid any WRITE/EXECUTE conflict, for + * example). + * Note that "max_prot" is an IN/OUT parameter only for this + * specific (VM_PROT_COPY) case. It's usually an OUT parameter + * only. + */ + max_prot = new_prot & VM_PROT_ALL; + kflags = VM_MAP_KERNEL_FLAGS_NONE; + kflags.vmkf_remap_prot_copy = TRUE; + kflags.vmkf_overwrite_immutable = TRUE; + new_start = start; + kr = vm_map_remap(map, + &new_start, + end - start, + 0, /* mask */ + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, + kflags, + 0, + map, + start, + TRUE, /* copy-on-write remapping! */ + &cur_prot, + &max_prot, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) { + return kr; + } + new_prot &= ~VM_PROT_COPY; + } vm_map_lock(map); @@ -3344,27 +5809,28 @@ vm_map_protect( */ if (start >= map->max_offset) { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - while(1) { + while (1) { /* - * Lookup the entry. If it doesn't start in a valid + * Lookup the entry. If it doesn't start in a valid * entry, return an error. */ - if (! vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry(map, start, &entry)) { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - if (entry->superpage_size && (start & (SUPERPAGE_SIZE-1))) { /* extend request to whole entry */ + if (entry->superpage_size && (start & (SUPERPAGE_SIZE - 1))) { /* extend request to whole entry */ start = SUPERPAGE_ROUND_DOWN(start); continue; } break; - } - if (entry->superpage_size) - end = SUPERPAGE_ROUND_UP(end); + } + if (entry->superpage_size) { + end = SUPERPAGE_ROUND_UP(end); + } /* * Make a first pass to check for protection and address @@ -3374,46 +5840,89 @@ vm_map_protect( current = entry; prev = current->vme_start; while ((current != vm_map_to_entry(map)) && - (current->vme_start < end)) { - + (current->vme_start < end)) { /* * If there is a hole, return an error. */ if (current->vme_start != prev) { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } new_max = current->max_protection; - if(new_prot & VM_PROT_COPY) { - new_max |= VM_PROT_WRITE; - if ((new_prot & (new_max | VM_PROT_COPY)) != new_prot) { - vm_map_unlock(map); - return(KERN_PROTECTION_FAILURE); - } - } else { - if ((new_prot & new_max) != new_prot) { - vm_map_unlock(map); - return(KERN_PROTECTION_FAILURE); - } + if ((new_prot & new_max) != new_prot) { + vm_map_unlock(map); + return KERN_PROTECTION_FAILURE; } -#if CONFIG_EMBEDDED - if (new_prot & VM_PROT_WRITE) { - if ((new_prot & VM_PROT_EXECUTE) && !(current->used_for_jit)) { - printf("EMBEDDED: %s can't have both write and exec at the same time\n", __FUNCTION__); - new_prot &= ~VM_PROT_EXECUTE; + if ((new_prot & VM_PROT_WRITE) && + (new_prot & VM_PROT_EXECUTE) && +#if !CONFIG_EMBEDDED + map != kernel_map && + cs_process_enforcement(NULL) && +#endif /* !CONFIG_EMBEDDED */ + !(current->used_for_jit)) { + DTRACE_VM3(cs_wx, + uint64_t, (uint64_t) current->vme_start, + uint64_t, (uint64_t) current->vme_end, + vm_prot_t, new_prot); + printf("CODE SIGNING: %d[%s] %s can't have both write and exec at the same time\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); + new_prot &= ~VM_PROT_EXECUTE; +#if VM_PROTECT_WX_FAIL + vm_map_unlock(map); + return KERN_PROTECTION_FAILURE; +#endif /* VM_PROTECT_WX_FAIL */ + } + + /* + * If the task has requested executable lockdown, + * deny both: + * - adding executable protections OR + * - adding write protections to an existing executable mapping. + */ + if (map->map_disallow_new_exec == TRUE) { + if ((new_prot & VM_PROT_EXECUTE) || + ((current->protection & VM_PROT_EXECUTE) && (new_prot & VM_PROT_WRITE))) { + vm_map_unlock(map); + return KERN_PROTECTION_FAILURE; } } -#endif prev = current->vme_end; current = current->vme_next; } - if (end > prev) { - vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); - } + +#if __arm64__ + if (end > prev && + end == vm_map_round_page(prev, VM_MAP_PAGE_MASK(map))) { + vm_map_entry_t prev_entry; + + prev_entry = current->vme_prev; + if (prev_entry != vm_map_to_entry(map) && + !prev_entry->map_aligned && + (vm_map_round_page(prev_entry->vme_end, + VM_MAP_PAGE_MASK(map)) + == end)) { + /* + * The last entry in our range is not "map-aligned" + * but it would have reached all the way to "end" + * if it had been map-aligned, so this is not really + * a hole in the range and we can proceed. + */ + prev = end; + } + } +#endif /* __arm64__ */ + + if (end > prev) { + vm_map_unlock(map); + return KERN_INVALID_ADDRESS; + } /* * Go back and fix up protections. @@ -3428,46 +5937,32 @@ vm_map_protect( } while ((current != vm_map_to_entry(map)) && - (current->vme_start < end)) { - - vm_prot_t old_prot; + (current->vme_start < end)) { + vm_prot_t old_prot; vm_map_clip_end(map, current, end); - assert(!current->use_pmap); /* clipping did unnest if needed */ + if (current->is_sub_map) { + /* clipping did unnest if needed */ + assert(!current->use_pmap); + } old_prot = current->protection; - if(new_prot & VM_PROT_COPY) { - /* caller is asking specifically to copy the */ - /* mapped data, this implies that max protection */ - /* will include write. Caller must be prepared */ - /* for loss of shared memory communication in the */ - /* target area after taking this step */ - - if (current->is_sub_map == FALSE && current->object.vm_object == VM_OBJECT_NULL){ - current->object.vm_object = vm_object_allocate((vm_map_size_t)(current->vme_end - current->vme_start)); - current->offset = 0; - } - current->needs_copy = TRUE; - current->max_protection |= VM_PROT_WRITE; + if (set_max) { + current->max_protection = new_prot; + current->protection = new_prot & old_prot; + } else { + current->protection = new_prot; } - if (set_max) - current->protection = - (current->max_protection = - new_prot & ~VM_PROT_COPY) & - old_prot; - else - current->protection = new_prot & ~VM_PROT_COPY; - /* * Update physical map if necessary. - * If the request is to turn off write protection, - * we won't do it for real (in pmap). This is because - * it would cause copy-on-write to fail. We've already - * set, the new protection in the map, so if a - * write-protect fault occurred, it will be fixed up + * If the request is to turn off write protection, + * we won't do it for real (in pmap). This is because + * it would cause copy-on-write to fail. We've already + * set, the new protection in the map, so if a + * write-protect fault occurred, it will be fixed up * properly, COW or not. */ if (current->protection != old_prot) { @@ -3477,21 +5972,75 @@ vm_map_protect( vm_prot_t prot; - prot = current->protection & ~VM_PROT_WRITE; + prot = current->protection; + if (current->is_sub_map || (VME_OBJECT(current) == NULL) || (VME_OBJECT(current) != compressor_object)) { + prot &= ~VM_PROT_WRITE; + } else { + assert(!VME_OBJECT(current)->code_signed); + assert(VME_OBJECT(current)->copy_strategy == MEMORY_OBJECT_COPY_NONE); + } + + if (override_nx(map, VME_ALIAS(current)) && prot) { + prot |= VM_PROT_EXECUTE; + } + +#if CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) + if (!(old_prot & VM_PROT_EXECUTE) && + (prot & VM_PROT_EXECUTE) && + panic_on_unsigned_execute && + (proc_selfcsflags() & CS_KILL)) { + panic("vm_map_protect(%p,0x%llx,0x%llx) old=0x%x new=0x%x - code-signing bypass?\n", map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, old_prot, prot); + } +#endif /* CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) */ + + if (pmap_has_prot_policy(prot)) { + if (current->wired_count) { + panic("vm_map_protect(%p,0x%llx,0x%llx) new=0x%x wired=%x\n", + map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, prot, current->wired_count); + } - if (override_nx(map, current->alias) && prot) - prot |= VM_PROT_EXECUTE; + /* If the pmap layer cares about this + * protection type, force a fault for + * each page so that vm_fault will + * repopulate the page with the full + * set of protections. + */ + /* + * TODO: We don't seem to need this, + * but this is due to an internal + * implementation detail of + * pmap_protect. Do we want to rely + * on this? + */ + prot = VM_PROT_NONE; + } if (current->is_sub_map && current->use_pmap) { - pmap_protect(current->object.sub_map->pmap, - current->vme_start, - current->vme_end, - prot); + pmap_protect(VME_SUBMAP(current)->pmap, + current->vme_start, + current->vme_end, + prot); } else { - pmap_protect(map->pmap, - current->vme_start, - current->vme_end, - prot); + if (prot & VM_PROT_WRITE) { + if (VME_OBJECT(current) == compressor_object) { + /* + * For write requests on the + * compressor, we wil ask the + * pmap layer to prevent us from + * taking a write fault when we + * attempt to access the mapping + * next. + */ + pmap_options |= PMAP_OPTIONS_PROTECT_IMMEDIATE; + } + } + + pmap_protect_options(map->pmap, + current->vme_start, + current->vme_end, + prot, + pmap_options, + NULL); } } current = current->vme_next; @@ -3499,13 +6048,13 @@ vm_map_protect( current = entry; while ((current != vm_map_to_entry(map)) && - (current->vme_start <= end)) { + (current->vme_start <= end)) { vm_map_simplify_entry(map, current); current = current->vme_next; } vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -3518,13 +6067,13 @@ vm_map_protect( */ kern_return_t vm_map_inherit( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - register vm_inherit_t new_inheritance) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_inherit_t new_inheritance) { - register vm_map_entry_t entry; - vm_map_entry_t temp_entry; + vm_map_entry_t entry; + vm_map_entry_t temp_entry; vm_map_lock(map); @@ -3532,8 +6081,7 @@ vm_map_inherit( if (vm_map_lookup_entry(map, start, &temp_entry)) { entry = temp_entry; - } - else { + } else { temp_entry = temp_entry->vme_next; entry = temp_entry; } @@ -3541,10 +6089,10 @@ vm_map_inherit( /* first check entire range for submaps which can't support the */ /* given inheritance. */ while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { - if(entry->is_sub_map) { - if(new_inheritance == VM_INHERIT_COPY) { + if (entry->is_sub_map) { + if (new_inheritance == VM_INHERIT_COPY) { vm_map_unlock(map); - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } @@ -3559,7 +6107,10 @@ vm_map_inherit( while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { vm_map_clip_end(map, entry, end); - assert(!entry->use_pmap); /* clip did unnest if needed */ + if (entry->is_sub_map) { + /* clip did unnest if needed */ + assert(!entry->use_pmap); + } entry->inheritance = new_inheritance; @@ -3567,7 +6118,7 @@ vm_map_inherit( } vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -3577,11 +6128,11 @@ vm_map_inherit( static kern_return_t add_wire_counts( - vm_map_t map, - vm_map_entry_t entry, - boolean_t user_wire) -{ - vm_map_size_t size; + vm_map_t map, + vm_map_entry_t entry, + boolean_t user_wire) +{ + vm_map_size_t size; if (user_wire) { unsigned int total_wire_count = vm_page_wire_count + vm_lopage_free_count; @@ -3593,45 +6144,46 @@ add_wire_counts( if (entry->user_wired_count == 0) { size = entry->vme_end - entry->vme_start; - + /* * Since this is the first time the user is wiring this map entry, check to see if we're * exceeding the user wire limits. There is a per map limit which is the smaller of either - * the process's rlimit or the global vm_user_wire_limit which caps this value. There is also + * the process's rlimit or the global vm_per_task_user_wire_limit which caps this value. There is also * a system-wide limit on the amount of memory all users can wire. If the user is over either * limit, then we fail. */ - if(size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) || - size + ptoa_64(total_wire_count) > vm_global_user_wire_limit || - size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount) + if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_per_task_user_wire_limit) || + size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) { return KERN_RESOURCE_SHORTAGE; + } /* * The first time the user wires an entry, we also increment the wired_count and add this to * the total that has been wired in the map. */ - if (entry->wired_count >= MAX_WIRE_COUNT) + if (entry->wired_count >= MAX_WIRE_COUNT) { return KERN_FAILURE; + } entry->wired_count++; map->user_wire_size += size; } - if (entry->user_wired_count >= MAX_WIRE_COUNT) + if (entry->user_wired_count >= MAX_WIRE_COUNT) { return KERN_FAILURE; + } entry->user_wired_count++; - } else { - /* * The kernel's wiring the memory. Just bump the count and continue. */ - if (entry->wired_count >= MAX_WIRE_COUNT) + if (entry->wired_count >= MAX_WIRE_COUNT) { panic("vm_map_wire: too many wirings"); + } entry->wired_count++; } @@ -3645,19 +6197,16 @@ add_wire_counts( static void subtract_wire_counts( - vm_map_t map, - vm_map_entry_t entry, - boolean_t user_wire) -{ - + vm_map_t map, + vm_map_entry_t entry, + boolean_t user_wire) +{ if (user_wire) { - /* * We're unwiring memory at the request of the user. See if we're removing the last user wire reference. */ if (entry->user_wired_count == 1) { - /* * We're removing the last user wire reference. Decrement the wired_count and the total * user wired memory for this map. @@ -3670,9 +6219,7 @@ subtract_wire_counts( assert(entry->user_wired_count >= 1); entry->user_wired_count--; - } else { - /* * The kernel is unwiring the memory. Just update the count. */ @@ -3682,6 +6229,8 @@ subtract_wire_counts( } } +int cs_executable_wire = 0; + /* * vm_map_wire: * @@ -3697,34 +6246,57 @@ subtract_wire_counts( */ static kern_return_t vm_map_wire_nested( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - register vm_prot_t access_type, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr) -{ - register vm_map_entry_t entry; - struct vm_map_entry *first_entry, tmp_entry; - vm_map_t real_map; - register vm_map_offset_t s,e; - kern_return_t rc; - boolean_t need_wakeup; - boolean_t main_map = FALSE; - wait_interrupt_t interruptible_state; - thread_t cur_thread; - unsigned int last_timestamp; - vm_map_size_t size; + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) +{ + vm_map_entry_t entry; + vm_prot_t access_type; + struct vm_map_entry *first_entry, tmp_entry; + vm_map_t real_map; + vm_map_offset_t s, e; + kern_return_t rc; + boolean_t need_wakeup; + boolean_t main_map = FALSE; + wait_interrupt_t interruptible_state; + thread_t cur_thread; + unsigned int last_timestamp; + vm_map_size_t size; + boolean_t wire_and_extract; + + access_type = (caller_prot & VM_PROT_ALL); + + wire_and_extract = FALSE; + if (physpage_p != NULL) { + /* + * The caller wants the physical page number of the + * wired page. We return only one physical page number + * so this works for only one page at a time. + */ + if ((end - start) != PAGE_SIZE) { + return KERN_INVALID_ARGUMENT; + } + wire_and_extract = TRUE; + *physpage_p = 0; + } vm_map_lock(map); - if(map_pmap == NULL) + if (map_pmap == NULL) { main_map = TRUE; + } last_timestamp = map->timestamp; VM_MAP_RANGE_CHECK(map, start, end); assert(page_aligned(start)); assert(page_aligned(end)); + assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); + assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); if (start == end) { /* We wired what the caller asked for, zero pages */ vm_map_unlock(map); @@ -3760,8 +6332,9 @@ vm_map_wire_nested( /* "e" is how far we want to wire in this entry */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } /* * If another thread is wiring/unwiring this entry then @@ -3792,10 +6365,10 @@ vm_map_wire_nested( /* * User wiring is interruptible */ - wait_result = vm_map_entry_wait(map, - (user_wire) ? THREAD_ABORTSAFE : - THREAD_UNINT); - if (user_wire && wait_result == THREAD_INTERRUPTED) { + wait_result = vm_map_entry_wait(map, + (user_wire) ? THREAD_ABORTSAFE : + THREAD_UNINT); + if (user_wire && wait_result == THREAD_INTERRUPTED) { /* * undo the wirings we have done so far * We do not clear the needs_wakeup flag, @@ -3827,33 +6400,44 @@ vm_map_wire_nested( entry = first_entry; continue; } - + if (entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_start; - vm_map_offset_t local_end; - pmap_t pmap; + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_start; + vm_map_offset_t local_end; + pmap_t pmap; + + if (wire_and_extract) { + /* + * Wiring would result in copy-on-write + * which would not be compatible with + * the sharing we have with the original + * provider of this memory. + */ + rc = KERN_INVALID_ARGUMENT; + goto done; + } vm_map_clip_start(map, entry, s); vm_map_clip_end(map, entry, end); - sub_start = entry->offset; + sub_start = VME_OFFSET(entry); sub_end = entry->vme_end; - sub_end += entry->offset - entry->vme_start; - + sub_end += VME_OFFSET(entry) - entry->vme_start; + local_end = entry->vme_end; - if(map_pmap == NULL) { - vm_object_t object; - vm_object_offset_t offset; - vm_prot_t prot; - boolean_t wired; - vm_map_entry_t local_entry; - vm_map_version_t version; - vm_map_t lookup_map; - - if(entry->use_pmap) { - pmap = entry->object.sub_map->pmap; + if (map_pmap == NULL) { + vm_object_t object; + vm_object_offset_t offset; + vm_prot_t prot; + boolean_t wired; + vm_map_entry_t local_entry; + vm_map_version_t version; + vm_map_t lookup_map; + + if (entry->use_pmap) { + pmap = VME_SUBMAP(entry)->pmap; /* ppc implementation requires that */ /* submaps pmap address ranges line */ /* up with parent map */ @@ -3867,8 +6451,9 @@ vm_map_wire_nested( } if (entry->wired_count) { - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } /* * The map was not unlocked: @@ -3878,7 +6463,6 @@ vm_map_wire_nested( entry = entry->vme_next; s = entry->vme_start; continue; - } /* call vm_map_lookup_locked to */ @@ -3887,30 +6471,31 @@ vm_map_wire_nested( local_start = entry->vme_start; lookup_map = map; vm_map_lock_write_to_read(map); - if(vm_map_lookup_locked( - &lookup_map, local_start, - access_type, - OBJECT_LOCK_EXCLUSIVE, - &version, &object, - &offset, &prot, &wired, - NULL, - &real_map)) { - + if (vm_map_lookup_locked( + &lookup_map, local_start, + access_type | VM_PROT_COPY, + OBJECT_LOCK_EXCLUSIVE, + &version, &object, + &offset, &prot, &wired, + NULL, + &real_map)) { vm_map_unlock_read(lookup_map); + assert(map_pmap == NULL); vm_map_unwire(map, start, - s, user_wire); - return(KERN_FAILURE); + s, user_wire); + return KERN_FAILURE; } - if(real_map != lookup_map) + vm_object_unlock(object); + if (real_map != lookup_map) { vm_map_unlock(real_map); + } vm_map_unlock_read(lookup_map); vm_map_lock(map); - vm_object_unlock(object); /* we unlocked, so must re-lookup */ - if (!vm_map_lookup_entry(map, - local_start, - &local_entry)) { + if (!vm_map_lookup_entry(map, + local_start, + &local_entry)) { rc = KERN_FAILURE; goto done; } @@ -3925,8 +6510,9 @@ vm_map_wire_nested( vm_map_clip_end(map, entry, end); /* re-compute "e" */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } /* did we have a change of type? */ if (!entry->is_sub_map) { @@ -3938,16 +6524,18 @@ vm_map_wire_nested( pmap = map_pmap; } - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } entry->in_transition = TRUE; vm_map_unlock(map); - rc = vm_map_wire_nested(entry->object.sub_map, - sub_start, sub_end, - access_type, - user_wire, pmap, pmap_addr); + rc = vm_map_wire_nested(VME_SUBMAP(entry), + sub_start, sub_end, + caller_prot, tag, + user_wire, pmap, pmap_addr, + NULL); vm_map_lock(map); /* @@ -3955,19 +6543,21 @@ vm_map_wire_nested( * after we unlocked the map. */ if (!vm_map_lookup_entry(map, local_start, - &first_entry)) + &first_entry)) { panic("vm_map_wire: re-lookup failed"); + } entry = first_entry; assert(local_start == s); /* re-compute "e" */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } last_timestamp = map->timestamp; while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < e)) { + (entry->vme_start < e)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -3979,7 +6569,7 @@ vm_map_wire_nested( } entry = entry->vme_next; } - if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ goto done; } @@ -3993,6 +6583,23 @@ vm_map_wire_nested( * the appropriate wire reference count. */ if (entry->wired_count) { + if ((entry->protection & access_type) != access_type) { + /* found a protection problem */ + + /* + * XXX FBDP + * We should always return an error + * in this case but since we didn't + * enforce it before, let's do + * it only for the new "wire_and_extract" + * code path for now... + */ + if (wire_and_extract) { + rc = KERN_PROTECTION_FAILURE; + goto done; + } + } + /* * entry is already wired down, get our reference * after clipping to our range. @@ -4000,8 +6607,59 @@ vm_map_wire_nested( vm_map_clip_start(map, entry, s); vm_map_clip_end(map, entry, end); - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } + + if (wire_and_extract) { + vm_object_t object; + vm_object_offset_t offset; + vm_page_t m; + + /* + * We don't have to "wire" the page again + * bit we still have to "extract" its + * physical page number, after some sanity + * checks. + */ + assert((entry->vme_end - entry->vme_start) + == PAGE_SIZE); + assert(!entry->needs_copy); + assert(!entry->is_sub_map); + assert(VME_OBJECT(entry)); + if (((entry->vme_end - entry->vme_start) + != PAGE_SIZE) || + entry->needs_copy || + entry->is_sub_map || + VME_OBJECT(entry) == VM_OBJECT_NULL) { + rc = KERN_INVALID_ARGUMENT; + goto done; + } + + object = VME_OBJECT(entry); + offset = VME_OFFSET(entry); + /* need exclusive lock to update m->dirty */ + if (entry->protection & VM_PROT_WRITE) { + vm_object_lock(object); + } else { + vm_object_lock_shared(object); + } + m = vm_page_lookup(object, offset); + assert(m != VM_PAGE_NULL); + assert(VM_PAGE_WIRED(m)); + if (m != VM_PAGE_NULL && VM_PAGE_WIRED(m)) { + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); + if (entry->protection & VM_PROT_WRITE) { + vm_object_lock_assert_exclusive( + object); + m->vmp_dirty = TRUE; + } + } else { + /* not already wired !? */ + *physpage_p = 0; + } + vm_object_unlock(object); + } /* map was not unlocked: no need to relookup */ entry = entry->vme_next; @@ -4013,6 +6671,38 @@ vm_map_wire_nested( * Unwired entry or wire request transmitted via submap */ + /* + * Wiring would copy the pages to the shadow object. + * The shadow object would not be code-signed so + * attempting to execute code from these copied pages + * would trigger a code-signing violation. + */ + + if ((entry->protection & VM_PROT_EXECUTE) +#if !CONFIG_EMBEDDED + && + map != kernel_map && + cs_process_enforcement(NULL) +#endif /* !CONFIG_EMBEDDED */ + ) { +#if MACH_ASSERT + printf("pid %d[%s] wiring executable range from " + "0x%llx to 0x%llx: rejected to preserve " + "code-signing\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end); +#endif /* MACH_ASSERT */ + DTRACE_VM2(cs_executable_wire, + uint64_t, (uint64_t)entry->vme_start, + uint64_t, (uint64_t)entry->vme_end); + cs_executable_wire++; + rc = KERN_PROTECTION_FAILURE; + goto done; + } /* * Perform actions of vm_map_lookup that need the write @@ -4027,12 +6717,29 @@ vm_map_wire_nested( * This is aggressive, but once it's wired we can't move it. */ if (entry->needs_copy) { - vm_object_shadow(&entry->object.vm_object, - &entry->offset, size); + if (wire_and_extract) { + /* + * We're supposed to share with the original + * provider so should not be "needs_copy" + */ + rc = KERN_INVALID_ARGUMENT; + goto done; + } + + VME_OBJECT_SHADOW(entry, size); entry->needs_copy = FALSE; - } else if (entry->object.vm_object == VM_OBJECT_NULL) { - entry->object.vm_object = vm_object_allocate(size); - entry->offset = (vm_object_offset_t)0; + } else if (VME_OBJECT(entry) == VM_OBJECT_NULL) { + if (wire_and_extract) { + /* + * We're supposed to share with the original + * provider so should already have an object. + */ + rc = KERN_INVALID_ARGUMENT; + goto done; + } + VME_OBJECT_SET(entry, vm_object_allocate(size)); + VME_OFFSET_SET(entry, (vm_object_offset_t)0); + assert(entry->use_pmap); } vm_map_clip_start(map, entry, s); @@ -4040,8 +6747,9 @@ vm_map_wire_nested( /* re-compute "e" */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } /* * Check for holes and protection mismatch. @@ -4052,7 +6760,7 @@ vm_map_wire_nested( */ if ((entry->vme_end < end) && ((entry->vme_next == vm_map_to_entry(map)) || - (entry->vme_next->vme_start > entry->vme_end))) { + (entry->vme_next->vme_start > entry->vme_end))) { /* found a hole */ rc = KERN_INVALID_ADDRESS; goto done; @@ -4065,8 +6773,9 @@ vm_map_wire_nested( assert(entry->wired_count == 0 && entry->user_wired_count == 0); - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } entry->in_transition = TRUE; @@ -4088,32 +6797,38 @@ vm_map_wire_nested( */ vm_map_unlock(map); - if (!user_wire && cur_thread != THREAD_NULL) + if (!user_wire && cur_thread != THREAD_NULL) { interruptible_state = thread_interrupt_level(THREAD_UNINT); - else + } else { interruptible_state = THREAD_UNINT; + } - if(map_pmap) - rc = vm_fault_wire(map, - &tmp_entry, map_pmap, pmap_addr); - else - rc = vm_fault_wire(map, - &tmp_entry, map->pmap, - tmp_entry.vme_start); + if (map_pmap) { + rc = vm_fault_wire(map, + &tmp_entry, caller_prot, tag, map_pmap, pmap_addr, + physpage_p); + } else { + rc = vm_fault_wire(map, + &tmp_entry, caller_prot, tag, map->pmap, + tmp_entry.vme_start, + physpage_p); + } - if (!user_wire && cur_thread != THREAD_NULL) + if (!user_wire && cur_thread != THREAD_NULL) { thread_interrupt_level(interruptible_state); + } vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have been clipped * after we unlocked the map. */ if (!vm_map_lookup_entry(map, tmp_entry.vme_start, - &first_entry)) + &first_entry)) { panic("vm_map_wire: re-lookup failed"); + } entry = first_entry; } @@ -4121,20 +6836,29 @@ vm_map_wire_nested( last_timestamp = map->timestamp; while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { entry->needs_wakeup = FALSE; need_wakeup = TRUE; } - if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ subtract_wire_counts(map, entry, user_wire); } entry = entry->vme_next; } - if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + goto done; + } + + if ((entry != vm_map_to_entry(map)) && /* we still have entries in the map */ + (tmp_entry.vme_end != end) && /* AND, we are not at the end of the requested range */ + (entry->vme_start != tmp_entry.vme_end)) { /* AND, the next entry is not contiguous. */ + /* found a "new" hole */ + s = tmp_entry.vme_end; + rc = KERN_INVALID_ADDRESS; goto done; } @@ -4152,31 +6876,103 @@ done: /* * wake up anybody waiting on entries we wired. */ - if (need_wakeup) + if (need_wakeup) { vm_map_entry_wakeup(map); + } if (rc != KERN_SUCCESS) { /* undo what has been wired so far */ - vm_map_unwire(map, start, s, user_wire); + vm_map_unwire_nested(map, start, s, user_wire, + map_pmap, pmap_addr); + if (physpage_p) { + *physpage_p = 0; + } } return rc; +} + +kern_return_t +vm_map_wire_external( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + boolean_t user_wire) +{ + kern_return_t kret; + kret = vm_map_wire_nested(map, start, end, caller_prot, vm_tag_bt(), + user_wire, (pmap_t)NULL, 0, NULL); + return kret; } kern_return_t -vm_map_wire( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - register vm_prot_t access_type, - boolean_t user_wire) +vm_map_wire_kernel( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire) { + kern_return_t kret; + + kret = vm_map_wire_nested(map, start, end, caller_prot, tag, + user_wire, (pmap_t)NULL, 0, NULL); + return kret; +} - kern_return_t kret; +kern_return_t +vm_map_wire_and_extract_external( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t caller_prot, + boolean_t user_wire, + ppnum_t *physpage_p) +{ + kern_return_t kret; + + kret = vm_map_wire_nested(map, + start, + start + VM_MAP_PAGE_SIZE(map), + caller_prot, + vm_tag_bt(), + user_wire, + (pmap_t)NULL, + 0, + physpage_p); + if (kret != KERN_SUCCESS && + physpage_p != NULL) { + *physpage_p = 0; + } + return kret; +} - kret = vm_map_wire_nested(map, start, end, access_type, - user_wire, (pmap_t)NULL, 0); +kern_return_t +vm_map_wire_and_extract_kernel( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire, + ppnum_t *physpage_p) +{ + kern_return_t kret; + + kret = vm_map_wire_nested(map, + start, + start + VM_MAP_PAGE_SIZE(map), + caller_prot, + tag, + user_wire, + (pmap_t)NULL, + 0, + physpage_p); + if (kret != KERN_SUCCESS && + physpage_p != NULL) { + *physpage_p = 0; + } return kret; } @@ -4195,27 +6991,30 @@ vm_map_wire( */ static kern_return_t vm_map_unwire_nested( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr) -{ - register vm_map_entry_t entry; - struct vm_map_entry *first_entry, tmp_entry; - boolean_t need_wakeup; - boolean_t main_map = FALSE; - unsigned int last_timestamp; + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr) +{ + vm_map_entry_t entry; + struct vm_map_entry *first_entry, tmp_entry; + boolean_t need_wakeup; + boolean_t main_map = FALSE; + unsigned int last_timestamp; vm_map_lock(map); - if(map_pmap == NULL) + if (map_pmap == NULL) { main_map = TRUE; + } last_timestamp = map->timestamp; VM_MAP_RANGE_CHECK(map, start, end); assert(page_aligned(start)); assert(page_aligned(end)); + assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); + assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); if (start == end) { /* We unwired what the caller asked for: zero pages */ @@ -4229,14 +7028,13 @@ vm_map_unwire_nested( * vm_map_clip_start will be done later. * We don't want to unnest any nested sub maps here ! */ - } - else { + } else { if (!user_wire) { panic("vm_map_unwire: start not found"); } /* Start address is not in map. */ vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } if (entry->superpage_size) { @@ -4269,7 +7067,7 @@ vm_map_unwire_nested( * going on. * We should probably just wait and retry, * but then we have to be careful that this - * entry could get "simplified" after + * entry could get "simplified" after * "in_transition" gets unset and before * we re-lookup the entry, so we would * have to re-clip the entry to avoid @@ -4291,21 +7089,21 @@ vm_map_unwire_nested( } if (entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; - pmap_t pmap; - + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; + pmap_t pmap; + vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); - sub_start = entry->offset; + sub_start = VME_OFFSET(entry); sub_end = entry->vme_end - entry->vme_start; - sub_end += entry->offset; + sub_end += VME_OFFSET(entry); local_end = entry->vme_end; - if(map_pmap == NULL) { - if(entry->use_pmap) { - pmap = entry->object.sub_map->pmap; + if (map_pmap == NULL) { + if (entry->use_pmap) { + pmap = VME_SUBMAP(entry)->pmap; pmap_addr = sub_start; } else { pmap = map->pmap; @@ -4313,8 +7111,9 @@ vm_map_unwire_nested( } if (entry->wired_count == 0 || (user_wire && entry->user_wired_count == 0)) { - if (!user_wire) + if (!user_wire) { panic("vm_map_unwire: entry is unwired"); + } entry = entry->vme_next; continue; } @@ -4324,16 +7123,17 @@ vm_map_unwire_nested( * Holes: Next entry should be contiguous unless * this is the end of the region. */ - if (((entry->vme_end < end) && - ((entry->vme_next == vm_map_to_entry(map)) || - (entry->vme_next->vme_start - > entry->vme_end)))) { - if (!user_wire) + if (((entry->vme_end < end) && + ((entry->vme_next == vm_map_to_entry(map)) || + (entry->vme_next->vme_start + > entry->vme_end)))) { + if (!user_wire) { panic("vm_map_unwire: non-contiguous region"); + } /* - entry = entry->vme_next; - continue; -*/ + * entry = entry->vme_next; + * continue; + */ } subtract_wire_counts(map, entry, user_wire); @@ -4351,33 +7151,35 @@ vm_map_unwire_nested( * guarantees existance of the entry. */ vm_map_unlock(map); - vm_map_unwire_nested(entry->object.sub_map, - sub_start, sub_end, user_wire, pmap, pmap_addr); + vm_map_unwire_nested(VME_SUBMAP(entry), + sub_start, sub_end, user_wire, pmap, pmap_addr); vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* - * Find the entry again. It could have been + * Find the entry again. It could have been * clipped or deleted after we unlocked the map. */ - if (!vm_map_lookup_entry(map, - tmp_entry.vme_start, - &first_entry)) { - if (!user_wire) + if (!vm_map_lookup_entry(map, + tmp_entry.vme_start, + &first_entry)) { + if (!user_wire) { panic("vm_map_unwire: re-lookup failed"); + } entry = first_entry->vme_next; - } else + } else { entry = first_entry; + } } last_timestamp = map->timestamp; /* * clear transition bit for all constituent entries - * that were in the original entry (saved in + * that were in the original entry (saved in * tmp_entry). Also check for waiters. */ while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -4389,24 +7191,26 @@ vm_map_unwire_nested( continue; } else { vm_map_unlock(map); - vm_map_unwire_nested(entry->object.sub_map, - sub_start, sub_end, user_wire, map_pmap, - pmap_addr); + vm_map_unwire_nested(VME_SUBMAP(entry), + sub_start, sub_end, user_wire, map_pmap, + pmap_addr); vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* - * Find the entry again. It could have been + * Find the entry again. It could have been * clipped or deleted after we unlocked the map. */ - if (!vm_map_lookup_entry(map, - tmp_entry.vme_start, - &first_entry)) { - if (!user_wire) + if (!vm_map_lookup_entry(map, + tmp_entry.vme_start, + &first_entry)) { + if (!user_wire) { panic("vm_map_unwire: re-lookup failed"); + } entry = first_entry->vme_next; - } else + } else { entry = first_entry; + } } last_timestamp = map->timestamp; } @@ -4415,15 +7219,16 @@ vm_map_unwire_nested( if ((entry->wired_count == 0) || (user_wire && entry->user_wired_count == 0)) { - if (!user_wire) + if (!user_wire) { panic("vm_map_unwire: entry is unwired"); + } entry = entry->vme_next; continue; } - + assert(entry->wired_count > 0 && - (!user_wire || entry->user_wired_count > 0)); + (!user_wire || entry->user_wired_count > 0)); vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); @@ -4433,12 +7238,12 @@ vm_map_unwire_nested( * Holes: Next entry should be contiguous unless * this is the end of the region. */ - if (((entry->vme_end < end) && - ((entry->vme_next == vm_map_to_entry(map)) || - (entry->vme_next->vme_start > entry->vme_end)))) { - - if (!user_wire) + if (((entry->vme_end < end) && + ((entry->vme_next == vm_map_to_entry(map)) || + (entry->vme_next->vme_start > entry->vme_end)))) { + if (!user_wire) { panic("vm_map_unwire: non-contiguous region"); + } entry = entry->vme_next; continue; } @@ -4450,40 +7255,42 @@ vm_map_unwire_nested( continue; } - if(entry->zero_wired_pages) { + if (entry->zero_wired_pages) { entry->zero_wired_pages = FALSE; } entry->in_transition = TRUE; - tmp_entry = *entry; /* see comment in vm_map_wire() */ + tmp_entry = *entry; /* see comment in vm_map_wire() */ /* * We can unlock the map now. The in_transition state * guarantees existance of the entry. */ vm_map_unlock(map); - if(map_pmap) { - vm_fault_unwire(map, - &tmp_entry, FALSE, map_pmap, pmap_addr); + if (map_pmap) { + vm_fault_unwire(map, + &tmp_entry, FALSE, map_pmap, pmap_addr); } else { - vm_fault_unwire(map, - &tmp_entry, FALSE, map->pmap, - tmp_entry.vme_start); + vm_fault_unwire(map, + &tmp_entry, FALSE, map->pmap, + tmp_entry.vme_start); } vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have been clipped * or deleted after we unlocked the map. */ if (!vm_map_lookup_entry(map, tmp_entry.vme_start, - &first_entry)) { - if (!user_wire) + &first_entry)) { + if (!user_wire) { panic("vm_map_unwire: re-lookup failed"); + } entry = first_entry->vme_next; - } else + } else { entry = first_entry; + } } last_timestamp = map->timestamp; @@ -4493,7 +7300,7 @@ vm_map_unwire_nested( * check for waiters. */ while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -4518,21 +7325,21 @@ vm_map_unwire_nested( /* * wake up anybody waiting on entries that we have unwired. */ - if (need_wakeup) + if (need_wakeup) { vm_map_entry_wakeup(map); - return(KERN_SUCCESS); - + } + return KERN_SUCCESS; } kern_return_t vm_map_unwire( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - boolean_t user_wire) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire) { - return vm_map_unwire_nested(map, start, end, - user_wire, (pmap_t)NULL, 0); + return vm_map_unwire_nested(map, start, end, + user_wire, (pmap_t)NULL, 0); } @@ -4540,30 +7347,34 @@ vm_map_unwire( * vm_map_entry_delete: [ internal use only ] * * Deallocate the given entry from the target map. - */ + */ static void vm_map_entry_delete( - register vm_map_t map, - register vm_map_entry_t entry) + vm_map_t map, + vm_map_entry_t entry) { - register vm_map_offset_t s, e; - register vm_object_t object; - register vm_map_t submap; + vm_map_offset_t s, e; + vm_object_t object; + vm_map_t submap; s = entry->vme_start; e = entry->vme_end; assert(page_aligned(s)); assert(page_aligned(e)); + if (entry->map_aligned == TRUE) { + assert(VM_MAP_PAGE_ALIGNED(s, VM_MAP_PAGE_MASK(map))); + assert(VM_MAP_PAGE_ALIGNED(e, VM_MAP_PAGE_MASK(map))); + } assert(entry->wired_count == 0); assert(entry->user_wired_count == 0); assert(!entry->permanent); if (entry->is_sub_map) { object = NULL; - submap = entry->object.sub_map; + submap = VME_SUBMAP(entry); } else { submap = NULL; - object = entry->object.vm_object; + object = VME_OBJECT(entry); } vm_map_store_entry_unlink(map, entry); @@ -4576,98 +7387,103 @@ vm_map_entry_delete( * Deallocate the object only after removing all * pmap entries pointing to its pages. */ - if (submap) + if (submap) { vm_map_deallocate(submap); - else + } else { vm_object_deallocate(object); - + } } void vm_map_submap_pmap_clean( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_t sub_map, - vm_map_offset_t offset) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t sub_map, + vm_map_offset_t offset) { - vm_map_offset_t submap_start; - vm_map_offset_t submap_end; - vm_map_size_t remove_size; - vm_map_entry_t entry; + vm_map_offset_t submap_start; + vm_map_offset_t submap_end; + vm_map_size_t remove_size; + vm_map_entry_t entry; submap_end = offset + (end - start); submap_start = offset; vm_map_lock_read(sub_map); - if(vm_map_lookup_entry(sub_map, offset, &entry)) { - + if (vm_map_lookup_entry(sub_map, offset, &entry)) { remove_size = (entry->vme_end - entry->vme_start); - if(offset > entry->vme_start) + if (offset > entry->vme_start) { remove_size -= offset - entry->vme_start; - + } + - if(submap_end < entry->vme_end) { + if (submap_end < entry->vme_end) { remove_size -= - entry->vme_end - submap_end; + entry->vme_end - submap_end; } - if(entry->is_sub_map) { + if (entry->is_sub_map) { vm_map_submap_pmap_clean( sub_map, start, start + remove_size, - entry->object.sub_map, - entry->offset); + VME_SUBMAP(entry), + VME_OFFSET(entry)); } else { - - if((map->mapped) && (map->ref_count) - && (entry->object.vm_object != NULL)) { - vm_object_pmap_protect( - entry->object.vm_object, - entry->offset+(offset-entry->vme_start), + if (map->mapped_in_other_pmaps && + os_ref_get_count(&map->map_refcnt) != 0 && + VME_OBJECT(entry) != NULL) { + vm_object_pmap_protect_options( + VME_OBJECT(entry), + (VME_OFFSET(entry) + + offset - + entry->vme_start), remove_size, PMAP_NULL, entry->vme_start, - VM_PROT_NONE); + VM_PROT_NONE, + PMAP_OPTIONS_REMOVE); } else { - pmap_remove(map->pmap, - (addr64_t)start, - (addr64_t)(start + remove_size)); + pmap_remove(map->pmap, + (addr64_t)start, + (addr64_t)(start + remove_size)); } } } entry = entry->vme_next; - while((entry != vm_map_to_entry(sub_map)) - && (entry->vme_start < submap_end)) { - remove_size = (entry->vme_end - entry->vme_start); - if(submap_end < entry->vme_end) { + while ((entry != vm_map_to_entry(sub_map)) + && (entry->vme_start < submap_end)) { + remove_size = (entry->vme_end - entry->vme_start); + if (submap_end < entry->vme_end) { remove_size -= entry->vme_end - submap_end; } - if(entry->is_sub_map) { + if (entry->is_sub_map) { vm_map_submap_pmap_clean( sub_map, (start + entry->vme_start) - offset, ((start + entry->vme_start) - offset) + remove_size, - entry->object.sub_map, - entry->offset); + VME_SUBMAP(entry), + VME_OFFSET(entry)); } else { - if((map->mapped) && (map->ref_count) - && (entry->object.vm_object != NULL)) { - vm_object_pmap_protect( - entry->object.vm_object, - entry->offset, + if (map->mapped_in_other_pmaps && + os_ref_get_count(&map->map_refcnt) != 0 && + VME_OBJECT(entry) != NULL) { + vm_object_pmap_protect_options( + VME_OBJECT(entry), + VME_OFFSET(entry), remove_size, PMAP_NULL, entry->vme_start, - VM_PROT_NONE); + VM_PROT_NONE, + PMAP_OPTIONS_REMOVE); } else { - pmap_remove(map->pmap, - (addr64_t)((start + entry->vme_start) - - offset), - (addr64_t)(((start + entry->vme_start) - - offset) + remove_size)); + pmap_remove(map->pmap, + (addr64_t)((start + entry->vme_start) + - offset), + (addr64_t)(((start + entry->vme_start) + - offset) + remove_size)); } } entry = entry->vme_next; @@ -4676,6 +7492,95 @@ vm_map_submap_pmap_clean( return; } +/* + * virt_memory_guard_ast: + * + * Handle the AST callout for a virtual memory guard. + * raise an EXC_GUARD exception and terminate the task + * if configured to do so. + */ +void +virt_memory_guard_ast( + thread_t thread, + mach_exception_data_type_t code, + mach_exception_data_type_t subcode) +{ + task_t task = thread->task; + assert(task != kernel_task); + assert(task == current_task()); + uint32_t behavior; + + behavior = task->task_exc_guard; + + /* Is delivery enabled */ + if ((behavior & TASK_EXC_GUARD_VM_DELIVER) == 0) { + return; + } + + /* If only once, make sure we're that once */ + while (behavior & TASK_EXC_GUARD_VM_ONCE) { + uint32_t new_behavior = behavior & ~TASK_EXC_GUARD_VM_DELIVER; + + if (OSCompareAndSwap(behavior, new_behavior, &task->task_exc_guard)) { + break; + } + behavior = task->task_exc_guard; + if ((behavior & TASK_EXC_GUARD_VM_DELIVER) == 0) { + return; + } + } + + /* Raise exception via corpse fork or synchronously */ + if ((task->task_exc_guard & TASK_EXC_GUARD_VM_CORPSE) && + (task->task_exc_guard & TASK_EXC_GUARD_VM_FATAL) == 0) { + task_violated_guard(code, subcode, NULL); + } else { + task_exception_notify(EXC_GUARD, code, subcode); + } + + /* Terminate the task if desired */ + if (task->task_exc_guard & TASK_EXC_GUARD_VM_FATAL) { + task_bsdtask_kill(current_task()); + } +} + +/* + * vm_map_guard_exception: + * + * Generate a GUARD_TYPE_VIRTUAL_MEMORY EXC_GUARD exception. + * + * Right now, we do this when we find nothing mapped, or a + * gap in the mapping when a user address space deallocate + * was requested. We report the address of the first gap found. + */ +static void +vm_map_guard_exception( + vm_map_offset_t gap_start, + unsigned reason) +{ + mach_exception_code_t code = 0; + unsigned int guard_type = GUARD_TYPE_VIRT_MEMORY; + unsigned int target = 0; /* should we pass in pid associated with map? */ + mach_exception_data_type_t subcode = (uint64_t)gap_start; + boolean_t fatal = FALSE; + + task_t task = current_task(); + + /* Can't deliver exceptions to kernel task */ + if (task == kernel_task) { + return; + } + + EXC_GUARD_ENCODE_TYPE(code, guard_type); + EXC_GUARD_ENCODE_FLAVOR(code, reason); + EXC_GUARD_ENCODE_TARGET(code, target); + + if (task->task_exc_guard & TASK_EXC_GUARD_VM_FATAL) { + fatal = TRUE; + } + thread_guard_violation(current_thread(), code, subcode, fatal); +} + /* * vm_map_delete: [ internal use only ] * @@ -4689,22 +7594,33 @@ vm_map_submap_pmap_clean( */ static kern_return_t vm_map_delete( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - int flags, - vm_map_t zap_map) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + int flags, + vm_map_t zap_map) { - vm_map_entry_t entry, next; - struct vm_map_entry *first_entry, tmp_entry; - register vm_map_offset_t s; - register vm_object_t object; - boolean_t need_wakeup; - unsigned int last_timestamp = ~0; /* unlikely value */ - int interruptible; + vm_map_entry_t entry, next; + struct vm_map_entry *first_entry, tmp_entry; + vm_map_offset_t s; + vm_object_t object; + boolean_t need_wakeup; + unsigned int last_timestamp = ~0; /* unlikely value */ + int interruptible; + vm_map_offset_t gap_start; + __unused vm_map_offset_t save_start = start; + __unused vm_map_offset_t save_end = end; + const vm_map_offset_t FIND_GAP = 1; /* a not page aligned value */ + const vm_map_offset_t GAPS_OK = 2; /* a different not page aligned value */ + + if (map != kernel_map && !(flags & VM_MAP_REMOVE_GAPS_OK) && !map->terminated) { + gap_start = FIND_GAP; + } else { + gap_start = GAPS_OK; + } - interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ? - THREAD_ABORTSAFE : THREAD_UNINT; + interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ? + THREAD_ABORTSAFE : THREAD_UNINT; /* * All our DMA I/O operations in IOKit are currently done by @@ -4719,22 +7635,60 @@ vm_map_delete( */ flags |= VM_MAP_REMOVE_WAIT_FOR_KWIRE; - while(1) { + while (1) { /* * Find the start of the region, and clip it */ if (vm_map_lookup_entry(map, start, &first_entry)) { entry = first_entry; - if (entry->superpage_size && (start & ~SUPERPAGE_MASK)) { /* extend request to whole entry */ start = SUPERPAGE_ROUND_DOWN(start); + if (map == kalloc_map && + (entry->vme_start != start || + entry->vme_end != end)) { + panic("vm_map_delete(%p,0x%llx,0x%llx): " + "mismatched entry %p [0x%llx:0x%llx]\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)entry->vme_start, + (uint64_t)entry->vme_end); + } + + /* + * If in a superpage, extend the range to include the start of the mapping. + */ + if (entry->superpage_size && (start & ~SUPERPAGE_MASK)) { start = SUPERPAGE_ROUND_DOWN(start); continue; } + if (start == entry->vme_start) { /* * No need to clip. We don't want to cause * any unnecessary unnesting in this case... */ } else { + if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && + entry->map_aligned && + !VM_MAP_PAGE_ALIGNED( + start, + VM_MAP_PAGE_MASK(map))) { + /* + * The entry will no longer be + * map-aligned after clipping + * and the caller said it's OK. + */ + entry->map_aligned = FALSE; + } + if (map == kalloc_map) { + panic("vm_map_delete(%p,0x%llx,0x%llx):" + " clipping %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)start); + } vm_map_clip_start(map, entry, start); } @@ -4744,12 +7698,25 @@ vm_map_delete( */ SAVE_HINT_MAP_WRITE(map, entry->vme_prev); } else { + if (map->pmap == kernel_pmap && + os_ref_get_count(&map->map_refcnt) != 0) { + panic("vm_map_delete(%p,0x%llx,0x%llx): " + "no map entry at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + (uint64_t)start); + } entry = first_entry->vme_next; + if (gap_start == FIND_GAP) { + gap_start = start; + } } break; } - if (entry->superpage_size) + if (entry->superpage_size) { end = SUPERPAGE_ROUND_UP(end); + } need_wakeup = FALSE; /* @@ -4780,6 +7747,25 @@ vm_map_delete( * vm_map_simplify_entry(). We need to * re-clip its start. */ + if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && + entry->map_aligned && + !VM_MAP_PAGE_ALIGNED(s, + VM_MAP_PAGE_MASK(map))) { + /* + * The entry will no longer be map-aligned + * after clipping and the caller said it's OK. + */ + entry->map_aligned = FALSE; + } + if (map == kalloc_map) { + panic("vm_map_delete(%p,0x%llx,0x%llx): " + "clipping %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)s); + } vm_map_clip_start(map, entry, s); } if (entry->vme_end <= end) { @@ -4788,13 +7774,90 @@ vm_map_delete( * to clip and possibly cause an unnecessary unnesting. */ } else { + if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && + entry->map_aligned && + !VM_MAP_PAGE_ALIGNED(end, + VM_MAP_PAGE_MASK(map))) { + /* + * The entry will no longer be map-aligned + * after clipping and the caller said it's OK. + */ + entry->map_aligned = FALSE; + } + if (map == kalloc_map) { + panic("vm_map_delete(%p,0x%llx,0x%llx): " + "clipping %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)end); + } vm_map_clip_end(map, entry, end); } if (entry->permanent) { - panic("attempt to remove permanent VM map entry " - "%p [0x%llx:0x%llx]\n", - entry, (uint64_t) s, (uint64_t) end); + if (map->pmap == kernel_pmap) { + panic("%s(%p,0x%llx,0x%llx): " + "attempt to remove permanent " + "VM map entry " + "%p [0x%llx:0x%llx]\n", + __FUNCTION__, + map, + (uint64_t) start, + (uint64_t) end, + entry, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end); + } else if (flags & VM_MAP_REMOVE_IMMUTABLE) { +// printf("FBDP %d[%s] removing permanent entry %p [0x%llx:0x%llx] prot 0x%x/0x%x\n", proc_selfpid(), (current_task()->bsd_info ? proc_name_address(current_task()->bsd_info) : "?"), entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end, entry->protection, entry->max_protection); + entry->permanent = FALSE; +#if PMAP_CS + } else if ((entry->protection & VM_PROT_EXECUTE) && !pmap_cs_enforced(map->pmap)) { + entry->permanent = FALSE; + + printf("%d[%s] %s(0x%llx,0x%llx): " + "pmap_cs disabled, allowing for permanent executable entry [0x%llx:0x%llx] " + "prot 0x%x/0x%x\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__, + (uint64_t) start, + (uint64_t) end, + (uint64_t)entry->vme_start, + (uint64_t)entry->vme_end, + entry->protection, + entry->max_protection); +#endif + } else { + if (vm_map_executable_immutable_verbose) { + printf("%d[%s] %s(0x%llx,0x%llx): " + "permanent entry [0x%llx:0x%llx] " + "prot 0x%x/0x%x\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__, + (uint64_t) start, + (uint64_t) end, + (uint64_t)entry->vme_start, + (uint64_t)entry->vme_end, + entry->protection, + entry->max_protection); + } + /* + * dtrace -n 'vm_map_delete_permanent { print("start=0x%llx end=0x%llx prot=0x%x/0x%x\n", arg0, arg1, arg2, arg3); stack(); ustack(); }' + */ + DTRACE_VM5(vm_map_delete_permanent, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + vm_prot_t, entry->protection, + vm_prot_t, entry->max_protection, + int, VME_ALIAS(entry)); + } } @@ -4825,7 +7888,6 @@ vm_map_delete( * We do not clear the needs_wakeup flag, * since we cannot tell if we were the only one. */ - vm_map_unlock(map); return KERN_ABORTED; } @@ -4834,11 +7896,12 @@ vm_map_delete( * may not exist anymore. Look it up again. */ if (!vm_map_lookup_entry(map, s, &first_entry)) { - assert((map != kernel_map) && - (!entry->is_sub_map)); /* * User: use the next entry */ + if (gap_start == FIND_GAP) { + gap_start = s; + } entry = first_entry->vme_next; s = entry->vme_start; } else { @@ -4850,23 +7913,24 @@ vm_map_delete( } /* end in_transition */ if (entry->wired_count) { - boolean_t user_wire; + boolean_t user_wire; user_wire = entry->user_wired_count > 0; /* - * Remove a kernel wiring if requested + * Remove a kernel wiring if requested */ if (flags & VM_MAP_REMOVE_KUNWIRE) { entry->wired_count--; } - + /* * Remove all user wirings for proper accounting */ if (entry->user_wired_count > 0) { - while (entry->user_wired_count) + while (entry->user_wired_count) { subtract_wire_counts(map, entry, user_wire); + } } if (entry->wired_count != 0) { @@ -4884,17 +7948,16 @@ vm_map_delete( assert(s == entry->vme_start); entry->needs_wakeup = TRUE; wait_result = vm_map_entry_wait(map, - interruptible); + interruptible); if (interruptible && wait_result == THREAD_INTERRUPTED) { /* - * We do not clear the - * needs_wakeup flag, since we - * cannot tell if we were the + * We do not clear the + * needs_wakeup flag, since we + * cannot tell if we were the * only one. */ - vm_map_unlock(map); return KERN_ABORTED; } @@ -4903,12 +7966,15 @@ vm_map_delete( * it may not exist anymore. Look it * up again. */ - if (!vm_map_lookup_entry(map, s, - &first_entry)) { + if (!vm_map_lookup_entry(map, s, + &first_entry)) { assert(map != kernel_map); /* * User: use the next entry */ + if (gap_start == FIND_GAP) { + gap_start = s; + } entry = first_entry->vme_next; s = entry->vme_start; } else { @@ -4917,8 +7983,7 @@ vm_map_delete( } last_timestamp = map->timestamp; continue; - } - else { + } else { return KERN_FAILURE; } } @@ -4941,12 +8006,12 @@ vm_map_delete( vm_map_offset_t sub_start, sub_end; pmap_t pmap; vm_map_offset_t pmap_addr; - - sub_map = tmp_entry.object.sub_map; - sub_start = tmp_entry.offset; + + sub_map = VME_SUBMAP(&tmp_entry); + sub_start = VME_OFFSET(&tmp_entry); sub_end = sub_start + (tmp_entry.vme_end - - tmp_entry.vme_start); + tmp_entry.vme_start); if (tmp_entry.use_pmap) { pmap = sub_map->pmap; pmap_addr = tmp_entry.vme_start; @@ -4955,26 +8020,37 @@ vm_map_delete( pmap_addr = tmp_entry.vme_start; } (void) vm_map_unwire_nested(sub_map, - sub_start, sub_end, - user_wire, - pmap, pmap_addr); + sub_start, sub_end, + user_wire, + pmap, pmap_addr); } else { - + if (VME_OBJECT(&tmp_entry) == kernel_object) { + pmap_protect_options( + map->pmap, + tmp_entry.vme_start, + tmp_entry.vme_end, + VM_PROT_NONE, + PMAP_OPTIONS_REMOVE, + NULL); + } vm_fault_unwire(map, &tmp_entry, - tmp_entry.object.vm_object == kernel_object, - map->pmap, tmp_entry.vme_start); + VME_OBJECT(&tmp_entry) == kernel_object, + map->pmap, tmp_entry.vme_start); } vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have * been clipped after we unlocked the map. */ - if (!vm_map_lookup_entry(map, s, &first_entry)){ - assert((map != kernel_map) && - (!entry->is_sub_map)); + if (!vm_map_lookup_entry(map, s, &first_entry)) { + assert((map != kernel_map) && + (!entry->is_sub_map)); + if (gap_start == FIND_GAP) { + gap_start = s; + } first_entry = first_entry->vme_next; s = first_entry->vme_start; } else { @@ -4989,7 +8065,7 @@ vm_map_delete( entry = first_entry; while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -5026,53 +8102,146 @@ vm_map_delete( } else if (entry->is_sub_map) { if (entry->use_pmap) { #ifndef NO_NESTED_PMAP - pmap_unnest(map->pmap, - (addr64_t)entry->vme_start, - entry->vme_end - entry->vme_start); -#endif /* NO_NESTED_PMAP */ - if ((map->mapped) && (map->ref_count)) { + int pmap_flags; + + if (flags & VM_MAP_REMOVE_NO_UNNESTING) { + /* + * This is the final cleanup of the + * address space being terminated. + * No new mappings are expected and + * we don't really need to unnest the + * shared region (and lose the "global" + * pmap mappings, if applicable). + * + * Tell the pmap layer that we're + * "clean" wrt nesting. + */ + pmap_flags = PMAP_UNNEST_CLEAN; + } else { + /* + * We're unmapping part of the nested + * shared region, so we can't keep the + * nested pmap. + */ + pmap_flags = 0; + } + pmap_unnest_options( + map->pmap, + (addr64_t)entry->vme_start, + entry->vme_end - entry->vme_start, + pmap_flags); +#endif /* NO_NESTED_PMAP */ + if (map->mapped_in_other_pmaps && + os_ref_get_count(&map->map_refcnt) != 0) { /* clean up parent map/maps */ vm_map_submap_pmap_clean( map, entry->vme_start, entry->vme_end, - entry->object.sub_map, - entry->offset); + VME_SUBMAP(entry), + VME_OFFSET(entry)); } } else { vm_map_submap_pmap_clean( map, entry->vme_start, entry->vme_end, - entry->object.sub_map, - entry->offset); + VME_SUBMAP(entry), + VME_OFFSET(entry)); } - } else if (entry->object.vm_object != kernel_object) { - object = entry->object.vm_object; - if((map->mapped) && (map->ref_count)) { - vm_object_pmap_protect( - object, entry->offset, + } else if (VME_OBJECT(entry) != kernel_object && + VME_OBJECT(entry) != compressor_object) { + object = VME_OBJECT(entry); + if (map->mapped_in_other_pmaps && + os_ref_get_count(&map->map_refcnt) != 0) { + vm_object_pmap_protect_options( + object, VME_OFFSET(entry), entry->vme_end - entry->vme_start, PMAP_NULL, entry->vme_start, - VM_PROT_NONE); - } else { - pmap_remove(map->pmap, - (addr64_t)entry->vme_start, - (addr64_t)entry->vme_end); + VM_PROT_NONE, + PMAP_OPTIONS_REMOVE); + } else if ((VME_OBJECT(entry) != VM_OBJECT_NULL) || + (map->pmap == kernel_pmap)) { + /* Remove translations associated + * with this range unless the entry + * does not have an object, or + * it's the kernel map or a descendant + * since the platform could potentially + * create "backdoor" mappings invisible + * to the VM. It is expected that + * objectless, non-kernel ranges + * do not have such VM invisible + * translations. + */ + pmap_remove_options(map->pmap, + (addr64_t)entry->vme_start, + (addr64_t)entry->vme_end, + PMAP_OPTIONS_REMOVE); } } + if (entry->iokit_acct) { + /* alternate accounting */ + DTRACE_VM4(vm_map_iokit_unmapped_region, + vm_map_t, map, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + int, VME_ALIAS(entry)); + vm_map_iokit_unmapped_region(map, + (entry->vme_end - + entry->vme_start)); + entry->iokit_acct = FALSE; + entry->use_pmap = FALSE; + } + /* * All pmap mappings for this map entry must have been * cleared by now. */ +#if DEBUG assert(vm_map_pmap_is_empty(map, - entry->vme_start, - entry->vme_end)); + entry->vme_start, + entry->vme_end)); +#endif /* DEBUG */ next = entry->vme_next; + + if (map->pmap == kernel_pmap && + os_ref_get_count(&map->map_refcnt) != 0 && + entry->vme_end < end && + (next == vm_map_to_entry(map) || + next->vme_start != entry->vme_end)) { + panic("vm_map_delete(%p,0x%llx,0x%llx): " + "hole after %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)entry->vme_end); + } + + /* + * If the desired range didn't end with "entry", then there is a gap if + * we wrapped around to the start of the map or if "entry" and "next" + * aren't contiguous. + * + * The vm_map_round_page() is needed since an entry can be less than VM_MAP_PAGE_MASK() sized. + * For example, devices which have h/w 4K pages, but entry sizes are all now 16K. + */ + if (gap_start == FIND_GAP && + vm_map_round_page(entry->vme_end, VM_MAP_PAGE_MASK(map)) < end && + (next == vm_map_to_entry(map) || entry->vme_end != next->vme_start)) { + gap_start = entry->vme_end; + } s = next->vme_start; last_timestamp = map->timestamp; - if ((flags & VM_MAP_REMOVE_SAVE_ENTRIES) && + if (entry->permanent) { + /* + * A permanent entry can not be removed, so leave it + * in place but remove all access permissions. + */ + entry->protection = VM_PROT_NONE; + entry->max_protection = VM_PROT_NONE; + } else if ((flags & VM_MAP_REMOVE_SAVE_ENTRIES) && zap_map != VM_MAP_NULL) { vm_map_size_t entry_size; /* @@ -5084,8 +8253,9 @@ vm_map_delete( vm_map_store_entry_unlink(map, entry); /* ... and add it to the end of the "zap_map" */ vm_map_store_entry_link(zap_map, - vm_map_last_entry(zap_map), - entry); + vm_map_last_entry(zap_map), + entry, + VM_MAP_KERNEL_FLAGS_NONE); entry_size = entry->vme_end - entry->vme_start; map->size -= entry_size; zap_map->size += entry_size; @@ -5099,46 +8269,93 @@ vm_map_delete( entry = next; - if(entry == vm_map_to_entry(map)) { + if (entry == vm_map_to_entry(map)) { break; } - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* - * we are responsible for deleting everything - * from the give space, if someone has interfered - * we pick up where we left off, back fills should - * be all right for anyone except map_delete and + * We are responsible for deleting everything + * from the given space. If someone has interfered, + * we pick up where we left off. Back fills should + * be all right for anyone, except map_delete, and * we have to assume that the task has been fully * disabled before we get here */ - if (!vm_map_lookup_entry(map, s, &entry)){ - entry = entry->vme_next; + if (!vm_map_lookup_entry(map, s, &entry)) { + entry = entry->vme_next; + + /* + * Nothing found for s. If we weren't already done, then there is a gap. + */ + if (gap_start == FIND_GAP && s < end) { + gap_start = s; + } s = entry->vme_start; - } else { + } else { SAVE_HINT_MAP_WRITE(map, entry->vme_prev); - } - /* - * others can not only allocate behind us, we can - * also see coalesce while we don't have the map lock + } + /* + * others can not only allocate behind us, we can + * also see coalesce while we don't have the map lock */ - if(entry == vm_map_to_entry(map)) { + if (entry == vm_map_to_entry(map)) { break; } } last_timestamp = map->timestamp; } - if (map->wait_for_space) + if (map->wait_for_space) { thread_wakeup((event_t) map); + } /* * wake up anybody waiting on entries that we have already deleted. */ - if (need_wakeup) + if (need_wakeup) { vm_map_entry_wakeup(map); + } + + if (gap_start != FIND_GAP && gap_start != GAPS_OK) { + DTRACE_VM3(kern_vm_deallocate_gap, + vm_map_offset_t, gap_start, + vm_map_offset_t, save_start, + vm_map_offset_t, save_end); + if (!(flags & VM_MAP_REMOVE_GAPS_OK)) { + vm_map_guard_exception(gap_start, kGUARD_EXC_DEALLOC_GAP); + } + } return KERN_SUCCESS; } + +/* + * vm_map_terminate: + * + * Clean out a task's map. + */ +kern_return_t +vm_map_terminate( + vm_map_t map) +{ + vm_map_lock(map); + map->terminated = TRUE; + vm_map_unlock(map); + + return vm_map_remove(map, + map->min_offset, + map->max_offset, + /* + * Final cleanup: + * + no unnesting + * + remove immutable mappings + * + allow gaps in range + */ + (VM_MAP_REMOVE_NO_UNNESTING | + VM_MAP_REMOVE_IMMUTABLE | + VM_MAP_REMOVE_GAPS_OK)); +} + /* * vm_map_remove: * @@ -5147,22 +8364,71 @@ vm_map_delete( */ kern_return_t vm_map_remove( - register vm_map_t map, - register vm_map_offset_t start, - register vm_map_offset_t end, - register boolean_t flags) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags) { - register kern_return_t result; + kern_return_t result; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); + /* + * For the zone_map, the kernel controls the allocation/freeing of memory. + * Any free to the zone_map should be within the bounds of the map and + * should free up memory. If the VM_MAP_RANGE_CHECK() silently converts a + * free to the zone_map into a no-op, there is a problem and we should + * panic. + */ + if ((map == zone_map) && (start == end)) { + panic("Nothing being freed to the zone_map. start = end = %p\n", (void *)start); + } result = vm_map_delete(map, start, end, flags, VM_MAP_NULL); vm_map_unlock(map); - return(result); + return result; +} + +/* + * vm_map_remove_locked: + * + * Remove the given address range from the target locked map. + * This is the exported form of vm_map_delete. + */ +kern_return_t +vm_map_remove_locked( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags) +{ + kern_return_t result; + + VM_MAP_RANGE_CHECK(map, start, end); + result = vm_map_delete(map, start, end, flags, VM_MAP_NULL); + return result; } +/* + * Routine: vm_map_copy_allocate + * + * Description: + * Allocates and initializes a map copy object. + */ +static vm_map_copy_t +vm_map_copy_allocate(void) +{ + vm_map_copy_t new_copy; + + new_copy = zalloc(vm_map_copy_zone); + bzero(new_copy, sizeof(*new_copy)); + new_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; + vm_map_copy_first_entry(new_copy) = vm_map_copy_to_entry(new_copy); + vm_map_copy_last_entry(new_copy) = vm_map_copy_to_entry(new_copy); + return new_copy; +} + /* * Routine: vm_map_copy_discard * @@ -5172,23 +8438,28 @@ vm_map_remove( */ void vm_map_copy_discard( - vm_map_copy_t copy) + vm_map_copy_t copy) { - if (copy == VM_MAP_COPY_NULL) + if (copy == VM_MAP_COPY_NULL) { return; + } switch (copy->type) { case VM_MAP_COPY_ENTRY_LIST: while (vm_map_copy_first_entry(copy) != - vm_map_copy_to_entry(copy)) { - vm_map_entry_t entry = vm_map_copy_first_entry(copy); + vm_map_copy_to_entry(copy)) { + vm_map_entry_t entry = vm_map_copy_first_entry(copy); vm_map_copy_entry_unlink(copy, entry); - vm_object_deallocate(entry->object.vm_object); + if (entry->is_sub_map) { + vm_map_deallocate(VME_SUBMAP(entry)); + } else { + vm_object_deallocate(VME_OBJECT(entry)); + } vm_map_copy_entry_dispose(copy, entry); } break; - case VM_MAP_COPY_OBJECT: + case VM_MAP_COPY_OBJECT: vm_object_deallocate(copy->cpy_object); break; case VM_MAP_COPY_KERNEL_BUFFER: @@ -5198,7 +8469,11 @@ vm_map_copy_discard( * allocated by a single call to kalloc(), i.e. the * vm_map_copy_t was not allocated out of the zone. */ - kfree(copy, copy->cpy_kalloc_size); + if (copy->size > msg_ool_size_small || copy->offset) { + panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld", + (long long)copy->size, (long long)copy->offset); + } + kfree(copy, copy->size + cpy_kdata_hdr_sz); return; } zfree(vm_map_copy_zone, copy); @@ -5223,12 +8498,13 @@ vm_map_copy_discard( */ vm_map_copy_t vm_map_copy_copy( - vm_map_copy_t copy) + vm_map_copy_t copy) { - vm_map_copy_t new_copy; + vm_map_copy_t new_copy; - if (copy == VM_MAP_COPY_NULL) + if (copy == VM_MAP_COPY_NULL) { return VM_MAP_COPY_NULL; + } /* * Allocate a new copy object, and copy the information @@ -5244,9 +8520,9 @@ vm_map_copy_copy( * changed to point to the new copy object. */ vm_map_copy_first_entry(copy)->vme_prev - = vm_map_copy_to_entry(new_copy); + = vm_map_copy_to_entry(new_copy); vm_map_copy_last_entry(copy)->vme_next - = vm_map_copy_to_entry(new_copy); + = vm_map_copy_to_entry(new_copy); } /* @@ -5264,15 +8540,15 @@ vm_map_copy_copy( static kern_return_t vm_map_overwrite_submap_recurse( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_size_t dst_size) + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_size_t dst_size) { - vm_map_offset_t dst_end; - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; - kern_return_t result; - boolean_t encountered_sub_map = FALSE; + vm_map_offset_t dst_end; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + kern_return_t result; + boolean_t encountered_sub_map = FALSE; @@ -5283,71 +8559,81 @@ vm_map_overwrite_submap_recurse( * splitting entries in strange ways. */ - dst_end = vm_map_round_page(dst_addr + dst_size); + dst_end = vm_map_round_page(dst_addr + dst_size, + VM_MAP_PAGE_MASK(dst_map)); vm_map_lock(dst_map); start_pass_1: if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr)); - assert(!tmp_entry->use_pmap); /* clipping did unnest if needed */ + vm_map_clip_start(dst_map, + tmp_entry, + vm_map_trunc_page(dst_addr, + VM_MAP_PAGE_MASK(dst_map))); + if (tmp_entry->is_sub_map) { + /* clipping did unnest if needed */ + assert(!tmp_entry->use_pmap); + } for (entry = tmp_entry;;) { - vm_map_entry_t next; + vm_map_entry_t next; next = entry->vme_next; - while(entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; + while (entry->is_sub_map) { + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; if (entry->in_transition) { /* * Say that we are waiting, and wait for entry. */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } encountered_sub_map = TRUE; - sub_start = entry->offset; + sub_start = VME_OFFSET(entry); - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; + } sub_end -= entry->vme_start; - sub_end += entry->offset; + sub_end += VME_OFFSET(entry); local_end = entry->vme_end; vm_map_unlock(dst_map); - + result = vm_map_overwrite_submap_recurse( - entry->object.sub_map, + VME_SUBMAP(entry), sub_start, sub_end - sub_start); - if(result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { return result; - if (dst_end <= entry->vme_end) + } + if (dst_end <= entry->vme_end) { return KERN_SUCCESS; + } vm_map_lock(dst_map); - if(!vm_map_lookup_entry(dst_map, local_end, - &tmp_entry)) { + if (!vm_map_lookup_entry(dst_map, local_end, + &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } entry = tmp_entry; next = entry->vme_next; } - if ( ! (entry->protection & VM_PROT_WRITE)) { + if (!(entry->protection & VM_PROT_WRITE)) { vm_map_unlock(dst_map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } /* @@ -5355,13 +8641,12 @@ start_pass_1: * for it to exit that state. Anything could happen * when we unlock the map, so start over. */ - if (entry->in_transition) { - - /* - * Say that we are waiting, and wait for entry. - */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + if (entry->in_transition) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } @@ -5379,18 +8664,18 @@ start_pass_1: if ((next == vm_map_to_entry(dst_map)) || (next->vme_start != entry->vme_end)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } /* * Check for permanent objects in the destination. */ - if ((entry->object.vm_object != VM_OBJECT_NULL) && - ((!entry->object.vm_object->internal) || - (entry->object.vm_object->true_share))) { - if(encountered_sub_map) { + if ((VME_OBJECT(entry) != VM_OBJECT_NULL) && + ((!VME_OBJECT(entry)->internal) || + (VME_OBJECT(entry)->true_share))) { + if (encountered_sub_map) { vm_map_unlock(dst_map); - return(KERN_FAILURE); + return KERN_FAILURE; } } @@ -5398,7 +8683,7 @@ start_pass_1: entry = next; }/* for */ vm_map_unlock(dst_map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -5454,31 +8739,32 @@ start_pass_1: static kern_return_t vm_map_copy_overwrite_nested( - vm_map_t dst_map, - vm_map_address_t dst_addr, - vm_map_copy_t copy, - boolean_t interruptible, - pmap_t pmap, - boolean_t discard_on_success) -{ - vm_map_offset_t dst_end; - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; - kern_return_t kr; - boolean_t aligned = TRUE; - boolean_t contains_permanent_objects = FALSE; - boolean_t encountered_sub_map = FALSE; - vm_map_offset_t base_addr; - vm_map_size_t copy_size; - vm_map_size_t total_size; + vm_map_t dst_map, + vm_map_address_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible, + pmap_t pmap, + boolean_t discard_on_success) +{ + vm_map_offset_t dst_end; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + kern_return_t kr; + boolean_t aligned = TRUE; + boolean_t contains_permanent_objects = FALSE; + boolean_t encountered_sub_map = FALSE; + vm_map_offset_t base_addr; + vm_map_size_t copy_size; + vm_map_size_t total_size; /* * Check for null copy object. */ - if (copy == VM_MAP_COPY_NULL) - return(KERN_SUCCESS); + if (copy == VM_MAP_COPY_NULL) { + return KERN_SUCCESS; + } /* * Check for special kernel buffer allocated @@ -5486,9 +8772,9 @@ vm_map_copy_overwrite_nested( */ if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { - return(vm_map_copyout_kernel_buffer( - dst_map, &dst_addr, - copy, TRUE)); + return vm_map_copyout_kernel_buffer( + dst_map, &dst_addr, + copy, copy->size, TRUE, discard_on_success); } /* @@ -5499,9 +8785,10 @@ vm_map_copy_overwrite_nested( assert(copy->type == VM_MAP_COPY_ENTRY_LIST); if (copy->size == 0) { - if (discard_on_success) + if (discard_on_success) { vm_map_copy_discard(copy); - return(KERN_SUCCESS); + } + return KERN_SUCCESS; } /* @@ -5511,12 +8798,15 @@ vm_map_copy_overwrite_nested( * splitting entries in strange ways. */ - if (!page_aligned(copy->size) || - !page_aligned (copy->offset) || - !page_aligned (dst_addr)) - { + if (!VM_MAP_PAGE_ALIGNED(copy->size, + VM_MAP_PAGE_MASK(dst_map)) || + !VM_MAP_PAGE_ALIGNED(copy->offset, + VM_MAP_PAGE_MASK(dst_map)) || + !VM_MAP_PAGE_ALIGNED(dst_addr, + VM_MAP_PAGE_MASK(dst_map))) { aligned = FALSE; - dst_end = vm_map_round_page(dst_addr + copy->size); + dst_end = vm_map_round_page(dst_addr + copy->size, + VM_MAP_PAGE_MASK(dst_map)); } else { dst_end = dst_addr + copy->size; } @@ -5529,73 +8819,78 @@ vm_map_copy_overwrite_nested( */ if (dst_addr >= dst_map->max_offset) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - + start_pass_1: if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr)); + vm_map_clip_start(dst_map, + tmp_entry, + vm_map_trunc_page(dst_addr, + VM_MAP_PAGE_MASK(dst_map))); for (entry = tmp_entry;;) { - vm_map_entry_t next = entry->vme_next; - - while(entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; + vm_map_entry_t next = entry->vme_next; - if (entry->in_transition) { + while (entry->is_sub_map) { + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; + if (entry->in_transition) { /* * Say that we are waiting, and wait for entry. */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } local_end = entry->vme_end; - if (!(entry->needs_copy)) { + if (!(entry->needs_copy)) { /* if needs_copy we are a COW submap */ /* in such a case we just replace so */ /* there is no need for the follow- */ /* ing check. */ encountered_sub_map = TRUE; - sub_start = entry->offset; + sub_start = VME_OFFSET(entry); - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; + } sub_end -= entry->vme_start; - sub_end += entry->offset; + sub_end += VME_OFFSET(entry); vm_map_unlock(dst_map); - + kr = vm_map_overwrite_submap_recurse( - entry->object.sub_map, + VME_SUBMAP(entry), sub_start, sub_end - sub_start); - if(kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } vm_map_lock(dst_map); } - if (dst_end <= entry->vme_end) + if (dst_end <= entry->vme_end) { goto start_overwrite; - if(!vm_map_lookup_entry(dst_map, local_end, - &entry)) { + } + if (!vm_map_lookup_entry(dst_map, local_end, + &entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } next = entry->vme_next; } - if ( ! (entry->protection & VM_PROT_WRITE)) { + if (!(entry->protection & VM_PROT_WRITE)) { vm_map_unlock(dst_map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } /* @@ -5603,13 +8898,12 @@ start_pass_1: * for it to exit that state. Anything could happen * when we unlock the map, so start over. */ - if (entry->in_transition) { - - /* - * Say that we are waiting, and wait for entry. - */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + if (entry->in_transition) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } @@ -5617,24 +8911,25 @@ start_pass_1: /* * our range is contained completely within this map entry */ - if (dst_end <= entry->vme_end) + if (dst_end <= entry->vme_end) { break; + } /* * check that range specified is contiguous region */ if ((next == vm_map_to_entry(dst_map)) || (next->vme_start != entry->vme_end)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } /* * Check for permanent objects in the destination. */ - if ((entry->object.vm_object != VM_OBJECT_NULL) && - ((!entry->object.vm_object->internal) || - (entry->object.vm_object->true_share))) { + if ((VME_OBJECT(entry) != VM_OBJECT_NULL) && + ((!VME_OBJECT(entry)->internal) || + (VME_OBJECT(entry)->true_share))) { contains_permanent_objects = TRUE; } @@ -5649,11 +8944,11 @@ start_overwrite: if (interruptible && contains_permanent_objects) { vm_map_unlock(dst_map); - return(KERN_FAILURE); /* XXX */ + return KERN_FAILURE; /* XXX */ } /* - * + * * Make a second pass, overwriting the data * At the beginning of each loop iteration, * the next entry to be overwritten is "tmp_entry" @@ -5663,31 +8958,31 @@ start_overwrite: */ total_size = copy->size; - if(encountered_sub_map) { + if (encountered_sub_map) { copy_size = 0; /* re-calculate tmp_entry since we've had the map */ /* unlocked */ if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { copy_size = copy->size; } - + base_addr = dst_addr; - while(TRUE) { + while (TRUE) { /* deconstruct the copy object and do in parts */ /* only in sub_map, interruptable case */ - vm_map_entry_t copy_entry; - vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL; - vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL; - int nentries; - int remaining_entries = 0; - vm_map_offset_t new_offset = 0; - + vm_map_entry_t copy_entry; + vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL; + vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL; + int nentries; + int remaining_entries = 0; + vm_map_offset_t new_offset = 0; + for (entry = tmp_entry; copy_size == 0;) { - vm_map_entry_t next; + vm_map_entry_t next; next = entry->vme_next; @@ -5700,121 +8995,119 @@ start_overwrite: /* encounter of a submap as dictated by base_addr */ /* we will zero copy_size accordingly. */ if (entry->in_transition) { - /* - * Say that we are waiting, and wait for entry. - */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); - - if(!vm_map_lookup_entry(dst_map, base_addr, - &tmp_entry)) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + + if (!vm_map_lookup_entry(dst_map, base_addr, + &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } copy_size = 0; entry = tmp_entry; continue; } - if(entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; + if (entry->is_sub_map) { + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; - if (entry->needs_copy) { + if (entry->needs_copy) { /* if this is a COW submap */ /* just back the range with a */ /* anonymous entry */ - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; - if(entry->vme_start < base_addr) + } + if (entry->vme_start < base_addr) { sub_start = base_addr; - else + } else { sub_start = entry->vme_start; + } vm_map_clip_end( dst_map, entry, sub_end); vm_map_clip_start( dst_map, entry, sub_start); assert(!entry->use_pmap); + assert(!entry->iokit_acct); + entry->use_pmap = TRUE; entry->is_sub_map = FALSE; vm_map_deallocate( - entry->object.sub_map); - entry->object.sub_map = NULL; + VME_SUBMAP(entry)); + VME_OBJECT_SET(entry, VM_OBJECT_NULL); + VME_OFFSET_SET(entry, 0); entry->is_shared = FALSE; entry->needs_copy = FALSE; - entry->offset = 0; - /* - * XXX FBDP - * We should propagate the protections - * of the submap entry here instead - * of forcing them to VM_PROT_ALL... - * Or better yet, we should inherit - * the protection of the copy_entry. - */ - entry->protection = VM_PROT_ALL; + entry->protection = VM_PROT_DEFAULT; entry->max_protection = VM_PROT_ALL; entry->wired_count = 0; entry->user_wired_count = 0; - if(entry->inheritance - == VM_INHERIT_SHARE) + if (entry->inheritance + == VM_INHERIT_SHARE) { entry->inheritance = VM_INHERIT_COPY; + } continue; } /* first take care of any non-sub_map */ /* entries to send */ - if(base_addr < entry->vme_start) { + if (base_addr < entry->vme_start) { /* stuff to send */ - copy_size = - entry->vme_start - base_addr; + copy_size = + entry->vme_start - base_addr; break; } - sub_start = entry->offset; + sub_start = VME_OFFSET(entry); - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; + } sub_end -= entry->vme_start; - sub_end += entry->offset; + sub_end += VME_OFFSET(entry); local_end = entry->vme_end; vm_map_unlock(dst_map); copy_size = sub_end - sub_start; /* adjust the copy object */ if (total_size > copy_size) { - vm_map_size_t local_size = 0; - vm_map_size_t entry_size; + vm_map_size_t local_size = 0; + vm_map_size_t entry_size; nentries = 1; new_offset = copy->offset; copy_entry = vm_map_copy_first_entry(copy); - while(copy_entry != - vm_map_copy_to_entry(copy)){ - entry_size = copy_entry->vme_end - - copy_entry->vme_start; - if((local_size < copy_size) && - ((local_size + entry_size) + while (copy_entry != + vm_map_copy_to_entry(copy)) { + entry_size = copy_entry->vme_end - + copy_entry->vme_start; + if ((local_size < copy_size) && + ((local_size + entry_size) >= copy_size)) { - vm_map_copy_clip_end(copy, - copy_entry, - copy_entry->vme_start + - (copy_size - local_size)); - entry_size = copy_entry->vme_end - - copy_entry->vme_start; + vm_map_copy_clip_end(copy, + copy_entry, + copy_entry->vme_start + + (copy_size - local_size)); + entry_size = copy_entry->vme_end - + copy_entry->vme_start; local_size += entry_size; new_offset += entry_size; } - if(local_size >= copy_size) { + if (local_size >= copy_size) { next_copy = copy_entry->vme_next; - copy_entry->vme_next = - vm_map_copy_to_entry(copy); - previous_prev = - copy->cpy_hdr.links.prev; + copy_entry->vme_next = + vm_map_copy_to_entry(copy); + previous_prev = + copy->cpy_hdr.links.prev; copy->cpy_hdr.links.prev = copy_entry; copy->size = copy_size; - remaining_entries = - copy->cpy_hdr.nentries; + remaining_entries = + copy->cpy_hdr.nentries; remaining_entries -= nentries; copy->cpy_hdr.nentries = nentries; break; @@ -5826,78 +9119,80 @@ start_overwrite: copy_entry = copy_entry->vme_next; } } - - if((entry->use_pmap) && (pmap == NULL)) { + + if ((entry->use_pmap) && (pmap == NULL)) { kr = vm_map_copy_overwrite_nested( - entry->object.sub_map, + VME_SUBMAP(entry), sub_start, copy, - interruptible, - entry->object.sub_map->pmap, + interruptible, + VME_SUBMAP(entry)->pmap, TRUE); } else if (pmap != NULL) { kr = vm_map_copy_overwrite_nested( - entry->object.sub_map, + VME_SUBMAP(entry), sub_start, copy, interruptible, pmap, TRUE); } else { kr = vm_map_copy_overwrite_nested( - entry->object.sub_map, + VME_SUBMAP(entry), sub_start, copy, interruptible, dst_map->pmap, TRUE); } - if(kr != KERN_SUCCESS) { - if(next_copy != NULL) { - copy->cpy_hdr.nentries += - remaining_entries; - copy->cpy_hdr.links.prev->vme_next = - next_copy; - copy->cpy_hdr.links.prev - = previous_prev; + if (kr != KERN_SUCCESS) { + if (next_copy != NULL) { + copy->cpy_hdr.nentries += + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev + = previous_prev; copy->size = total_size; } return kr; } if (dst_end <= local_end) { - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* otherwise copy no longer exists, it was */ /* destroyed after successful copy_overwrite */ - copy = (vm_map_copy_t) - zalloc(vm_map_copy_zone); - vm_map_copy_first_entry(copy) = - vm_map_copy_last_entry(copy) = - vm_map_copy_to_entry(copy); + copy = vm_map_copy_allocate(); copy->type = VM_MAP_COPY_ENTRY_LIST; copy->offset = new_offset; + /* + * XXX FBDP + * this does not seem to deal with + * the VM map store (R&B tree) + */ + total_size -= copy_size; copy_size = 0; /* put back remainder of copy in container */ - if(next_copy != NULL) { + if (next_copy != NULL) { copy->cpy_hdr.nentries = remaining_entries; copy->cpy_hdr.links.next = next_copy; copy->cpy_hdr.links.prev = previous_prev; copy->size = total_size; - next_copy->vme_prev = - vm_map_copy_to_entry(copy); + next_copy->vme_prev = + vm_map_copy_to_entry(copy); next_copy = NULL; } base_addr = local_end; vm_map_lock(dst_map); - if(!vm_map_lookup_entry(dst_map, - local_end, &tmp_entry)) { + if (!vm_map_lookup_entry(dst_map, + local_end, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } entry = tmp_entry; continue; - } + } if (dst_end <= entry->vme_end) { copy_size = dst_end - base_addr; break; @@ -5906,7 +9201,7 @@ start_overwrite: if ((next == vm_map_to_entry(dst_map)) || (next->vme_start != entry->vme_end)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } entry = next; @@ -5917,35 +9212,35 @@ start_overwrite: /* adjust the copy object */ if (total_size > copy_size) { - vm_map_size_t local_size = 0; - vm_map_size_t entry_size; + vm_map_size_t local_size = 0; + vm_map_size_t entry_size; new_offset = copy->offset; copy_entry = vm_map_copy_first_entry(copy); - while(copy_entry != vm_map_copy_to_entry(copy)) { - entry_size = copy_entry->vme_end - - copy_entry->vme_start; - if((local_size < copy_size) && - ((local_size + entry_size) + while (copy_entry != vm_map_copy_to_entry(copy)) { + entry_size = copy_entry->vme_end - + copy_entry->vme_start; + if ((local_size < copy_size) && + ((local_size + entry_size) >= copy_size)) { - vm_map_copy_clip_end(copy, copy_entry, - copy_entry->vme_start + - (copy_size - local_size)); - entry_size = copy_entry->vme_end - - copy_entry->vme_start; + vm_map_copy_clip_end(copy, copy_entry, + copy_entry->vme_start + + (copy_size - local_size)); + entry_size = copy_entry->vme_end - + copy_entry->vme_start; local_size += entry_size; new_offset += entry_size; } - if(local_size >= copy_size) { + if (local_size >= copy_size) { next_copy = copy_entry->vme_next; - copy_entry->vme_next = - vm_map_copy_to_entry(copy); - previous_prev = - copy->cpy_hdr.links.prev; + copy_entry->vme_next = + vm_map_copy_to_entry(copy); + previous_prev = + copy->cpy_hdr.links.prev; copy->cpy_hdr.links.prev = copy_entry; copy->size = copy_size; - remaining_entries = - copy->cpy_hdr.nentries; + remaining_entries = + copy->cpy_hdr.nentries; remaining_entries -= nentries; copy->cpy_hdr.nentries = nentries; break; @@ -5959,23 +9254,24 @@ start_overwrite: } if (aligned) { - pmap_t local_pmap; + pmap_t local_pmap; - if(pmap) + if (pmap) { local_pmap = pmap; - else + } else { local_pmap = dst_map->pmap; + } - if ((kr = vm_map_copy_overwrite_aligned( - dst_map, tmp_entry, copy, - base_addr, local_pmap)) != KERN_SUCCESS) { - if(next_copy != NULL) { - copy->cpy_hdr.nentries += - remaining_entries; - copy->cpy_hdr.links.prev->vme_next = - next_copy; - copy->cpy_hdr.links.prev = - previous_prev; + if ((kr = vm_map_copy_overwrite_aligned( + dst_map, tmp_entry, copy, + base_addr, local_pmap)) != KERN_SUCCESS) { + if (next_copy != NULL) { + copy->cpy_hdr.nentries += + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev = + previous_prev; copy->size += copy_size; } return kr; @@ -5993,27 +9289,33 @@ start_overwrite: * bits of the region in this case ! */ /* ALWAYS UNLOCKS THE dst_map MAP */ - if ((kr = vm_map_copy_overwrite_unaligned( dst_map, - tmp_entry, copy, base_addr)) != KERN_SUCCESS) { - if(next_copy != NULL) { + kr = vm_map_copy_overwrite_unaligned( + dst_map, + tmp_entry, + copy, + base_addr, + discard_on_success); + if (kr != KERN_SUCCESS) { + if (next_copy != NULL) { copy->cpy_hdr.nentries += - remaining_entries; - copy->cpy_hdr.links.prev->vme_next = - next_copy; - copy->cpy_hdr.links.prev = - previous_prev; + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev = + previous_prev; copy->size += copy_size; } return kr; } } total_size -= copy_size; - if(total_size == 0) + if (total_size == 0) { break; + } base_addr += copy_size; copy_size = 0; copy->offset = new_offset; - if(next_copy != NULL) { + if (next_copy != NULL) { copy->cpy_hdr.nentries = remaining_entries; copy->cpy_hdr.links.next = next_copy; copy->cpy_hdr.links.prev = previous_prev; @@ -6021,20 +9323,23 @@ start_overwrite: copy->size = total_size; } vm_map_lock(dst_map); - while(TRUE) { - if (!vm_map_lookup_entry(dst_map, - base_addr, &tmp_entry)) { + while (TRUE) { + if (!vm_map_lookup_entry(dst_map, + base_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - if (tmp_entry->in_transition) { - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + if (tmp_entry->in_transition) { + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); } else { break; } } - vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(base_addr)); + vm_map_clip_start(dst_map, + tmp_entry, + vm_map_trunc_page(base_addr, + VM_MAP_PAGE_MASK(dst_map))); entry = tmp_entry; } /* while */ @@ -6042,24 +9347,26 @@ start_overwrite: /* * Throw away the vm_map_copy object */ - if (discard_on_success) + if (discard_on_success) { vm_map_copy_discard(copy); + } - return(KERN_SUCCESS); + return KERN_SUCCESS; }/* vm_map_copy_overwrite */ kern_return_t vm_map_copy_overwrite( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_copy_t copy, - boolean_t interruptible) + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible) { - vm_map_size_t head_size, tail_size; - vm_map_copy_t head_copy, tail_copy; - vm_map_offset_t head_addr, tail_addr; - vm_map_entry_t entry; - kern_return_t kr; + vm_map_size_t head_size, tail_size; + vm_map_copy_t head_copy, tail_copy; + vm_map_offset_t head_addr, tail_addr; + vm_map_entry_t entry; + kern_return_t kr; + vm_map_offset_t effective_page_mask, effective_page_size; head_size = 0; tail_size = 0; @@ -6075,23 +9382,29 @@ vm_map_copy_overwrite( * We can't split the "copy" map if we're interruptible * or if we don't have a "copy" map... */ - blunt_copy: +blunt_copy: return vm_map_copy_overwrite_nested(dst_map, - dst_addr, - copy, - interruptible, - (pmap_t) NULL, - TRUE); + dst_addr, + copy, + interruptible, + (pmap_t) NULL, + TRUE); } - if (copy->size < 3 * PAGE_SIZE) { + effective_page_mask = MAX(VM_MAP_PAGE_MASK(dst_map), PAGE_MASK); + effective_page_mask = MAX(VM_MAP_COPY_PAGE_MASK(copy), + effective_page_mask); + effective_page_size = effective_page_mask + 1; + + if (copy->size < 3 * effective_page_size) { /* * Too small to bother with optimizing... */ goto blunt_copy; } - if ((dst_addr & PAGE_MASK) != (copy->offset & PAGE_MASK)) { + if ((dst_addr & effective_page_mask) != + (copy->offset & effective_page_mask)) { /* * Incompatible mis-alignment of source and destination... */ @@ -6103,19 +9416,26 @@ vm_map_copy_overwrite( * Let's try and do a small unaligned copy first (if needed) * and then an aligned copy for the rest. */ - if (!page_aligned(dst_addr)) { + if (!vm_map_page_aligned(dst_addr, effective_page_mask)) { head_addr = dst_addr; - head_size = PAGE_SIZE - (copy->offset & PAGE_MASK); + head_size = (effective_page_size - + (copy->offset & effective_page_mask)); + head_size = MIN(head_size, copy->size); } - if (!page_aligned(copy->offset + copy->size)) { + if (!vm_map_page_aligned(copy->offset + copy->size, + effective_page_mask)) { /* * Mis-alignment at the end. * Do an aligned copy up to the last page and * then an unaligned copy for the remaining bytes. */ - tail_size = (copy->offset + copy->size) & PAGE_MASK; + tail_size = ((copy->offset + copy->size) & + effective_page_mask); + tail_size = MIN(tail_size, copy->size); tail_addr = dst_addr + copy->size - tail_size; + assert(tail_addr >= head_addr + head_size); } + assert(head_size + tail_size <= copy->size); if (head_size + tail_size == copy->size) { /* @@ -6131,14 +9451,14 @@ vm_map_copy_overwrite( * in that case. */ vm_map_lock_read(dst_map); - if (! vm_map_lookup_entry(dst_map, dst_addr, &entry)) { + if (!vm_map_lookup_entry(dst_map, dst_addr, &entry)) { vm_map_unlock_read(dst_map); goto blunt_copy; } for (; - (entry != vm_map_copy_to_entry(copy) && - entry->vme_start < dst_addr + copy->size); - entry = entry->vme_next) { + (entry != vm_map_copy_to_entry(copy) && + entry->vme_start < dst_addr + copy->size); + entry = entry->vme_next) { if (entry->is_sub_map) { vm_map_unlock_read(dst_map); goto blunt_copy; @@ -6151,60 +9471,54 @@ vm_map_copy_overwrite( * Unaligned copy of the first "head_size" bytes, to reach * a page boundary. */ - + /* * Extract "head_copy" out of "copy". */ - head_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); - vm_map_copy_first_entry(head_copy) = - vm_map_copy_to_entry(head_copy); - vm_map_copy_last_entry(head_copy) = - vm_map_copy_to_entry(head_copy); + head_copy = vm_map_copy_allocate(); head_copy->type = VM_MAP_COPY_ENTRY_LIST; - head_copy->cpy_hdr.nentries = 0; head_copy->cpy_hdr.entries_pageable = - copy->cpy_hdr.entries_pageable; + copy->cpy_hdr.entries_pageable; vm_map_store_init(&head_copy->cpy_hdr); + entry = vm_map_copy_first_entry(copy); + if (entry->vme_end < copy->offset + head_size) { + head_size = entry->vme_end - copy->offset; + } + head_copy->offset = copy->offset; head_copy->size = head_size; - copy->offset += head_size; copy->size -= head_size; - entry = vm_map_copy_first_entry(copy); vm_map_copy_clip_end(copy, entry, copy->offset); vm_map_copy_entry_unlink(copy, entry); vm_map_copy_entry_link(head_copy, - vm_map_copy_to_entry(head_copy), - entry); + vm_map_copy_to_entry(head_copy), + entry); /* * Do the unaligned copy. */ kr = vm_map_copy_overwrite_nested(dst_map, - head_addr, - head_copy, - interruptible, - (pmap_t) NULL, - FALSE); - if (kr != KERN_SUCCESS) + head_addr, + head_copy, + interruptible, + (pmap_t) NULL, + FALSE); + if (kr != KERN_SUCCESS) { goto done; + } } if (tail_size) { /* * Extract "tail_copy" out of "copy". */ - tail_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); - vm_map_copy_first_entry(tail_copy) = - vm_map_copy_to_entry(tail_copy); - vm_map_copy_last_entry(tail_copy) = - vm_map_copy_to_entry(tail_copy); + tail_copy = vm_map_copy_allocate(); tail_copy->type = VM_MAP_COPY_ENTRY_LIST; - tail_copy->cpy_hdr.nentries = 0; tail_copy->cpy_hdr.entries_pageable = - copy->cpy_hdr.entries_pageable; + copy->cpy_hdr.entries_pageable; vm_map_store_init(&tail_copy->cpy_hdr); tail_copy->offset = copy->offset + copy->size - tail_size; @@ -6217,30 +9531,30 @@ vm_map_copy_overwrite( entry = vm_map_copy_last_entry(copy); vm_map_copy_entry_unlink(copy, entry); vm_map_copy_entry_link(tail_copy, - vm_map_copy_last_entry(tail_copy), - entry); + vm_map_copy_last_entry(tail_copy), + entry); } /* * Copy most (or possibly all) of the data. */ kr = vm_map_copy_overwrite_nested(dst_map, - dst_addr + head_size, - copy, - interruptible, - (pmap_t) NULL, - FALSE); + dst_addr + head_size, + copy, + interruptible, + (pmap_t) NULL, + FALSE); if (kr != KERN_SUCCESS) { goto done; } if (tail_size) { kr = vm_map_copy_overwrite_nested(dst_map, - tail_addr, - tail_copy, - interruptible, - (pmap_t) NULL, - FALSE); + tail_addr, + tail_copy, + interruptible, + (pmap_t) NULL, + FALSE); } done: @@ -6266,8 +9580,8 @@ done: entry = vm_map_copy_first_entry(head_copy); vm_map_copy_entry_unlink(head_copy, entry); vm_map_copy_entry_link(copy, - vm_map_copy_to_entry(copy), - entry); + vm_map_copy_to_entry(copy), + entry); copy->offset -= head_size; copy->size += head_size; vm_map_copy_discard(head_copy); @@ -6277,8 +9591,8 @@ done: entry = vm_map_copy_last_entry(tail_copy); vm_map_copy_entry_unlink(tail_copy, entry); vm_map_copy_entry_link(copy, - vm_map_copy_last_entry(copy), - entry); + vm_map_copy_last_entry(copy), + entry); copy->size += tail_size; vm_map_copy_discard(tail_copy); tail_copy = NULL; @@ -6313,23 +9627,28 @@ done: static kern_return_t vm_map_copy_overwrite_unaligned( - vm_map_t dst_map, - vm_map_entry_t entry, - vm_map_copy_t copy, - vm_map_offset_t start) -{ - vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy); - vm_map_version_t version; - vm_object_t dst_object; - vm_object_offset_t dst_offset; - vm_object_offset_t src_offset; - vm_object_offset_t entry_offset; - vm_map_offset_t entry_end; - vm_map_size_t src_size, - dst_size, - copy_size, - amount_left; - kern_return_t kr = KERN_SUCCESS; + vm_map_t dst_map, + vm_map_entry_t entry, + vm_map_copy_t copy, + vm_map_offset_t start, + boolean_t discard_on_success) +{ + vm_map_entry_t copy_entry; + vm_map_entry_t copy_entry_next; + vm_map_version_t version; + vm_object_t dst_object; + vm_object_offset_t dst_offset; + vm_object_offset_t src_offset; + vm_object_offset_t entry_offset; + vm_map_offset_t entry_end; + vm_map_size_t src_size, + dst_size, + copy_size, + amount_left; + kern_return_t kr = KERN_SUCCESS; + + + copy_entry = vm_map_copy_first_entry(copy); vm_map_lock_write_to_read(dst_map); @@ -6338,23 +9657,22 @@ vm_map_copy_overwrite_unaligned( /* * unaligned so we never clipped this entry, we need the offset into * the vm_object not just the data. - */ + */ while (amount_left > 0) { - if (entry == vm_map_to_entry(dst_map)) { vm_map_unlock_read(dst_map); return KERN_INVALID_ADDRESS; } /* "start" must be within the current map entry */ - assert ((start>=entry->vme_start) && (startvme_end)); + assert((start >= entry->vme_start) && (start < entry->vme_end)); dst_offset = start - entry->vme_start; dst_size = entry->vme_end - start; src_size = copy_entry->vme_end - - (copy_entry->vme_start + src_offset); + (copy_entry->vme_start + src_offset); if (dst_size < src_size) { /* @@ -6378,33 +9696,32 @@ vm_map_copy_overwrite_unaligned( * Copy on write region. */ if (entry->needs_copy && - ((entry->protection & VM_PROT_WRITE) != 0)) - { + ((entry->protection & VM_PROT_WRITE) != 0)) { if (vm_map_lock_read_to_write(dst_map)) { vm_map_lock_read(dst_map); goto RetryLookup; } - vm_object_shadow(&entry->object.vm_object, - &entry->offset, - (vm_map_size_t)(entry->vme_end - - entry->vme_start)); + VME_OBJECT_SHADOW(entry, + (vm_map_size_t)(entry->vme_end + - entry->vme_start)); entry->needs_copy = FALSE; vm_map_lock_write_to_read(dst_map); } - dst_object = entry->object.vm_object; + dst_object = VME_OBJECT(entry); /* * unlike with the virtual (aligned) copy we're going * to fault on it therefore we need a target object. */ - if (dst_object == VM_OBJECT_NULL) { + if (dst_object == VM_OBJECT_NULL) { if (vm_map_lock_read_to_write(dst_map)) { vm_map_lock_read(dst_map); goto RetryLookup; } dst_object = vm_object_allocate((vm_map_size_t) - entry->vme_end - entry->vme_start); - entry->object.vm_object = dst_object; - entry->offset = 0; + entry->vme_end - entry->vme_start); + VME_OBJECT_SET(entry, dst_object); + VME_OFFSET_SET(entry, 0); + assert(entry->use_pmap); vm_map_lock_write_to_read(dst_map); } /* @@ -6413,15 +9730,15 @@ vm_map_copy_overwrite_unaligned( */ vm_object_reference(dst_object); version.main_timestamp = dst_map->timestamp; - entry_offset = entry->offset; + entry_offset = VME_OFFSET(entry); entry_end = entry->vme_end; vm_map_unlock_read(dst_map); /* * Copy as much as possible in one pass */ kr = vm_fault_copy( - copy_entry->object.vm_object, - copy_entry->offset + src_offset, + VME_OBJECT(copy_entry), + VME_OFFSET(copy_entry) + src_offset, ©_size, dst_object, entry_offset + dst_offset, @@ -6439,31 +9756,40 @@ vm_map_copy_overwrite_unaligned( /* * If a hard error occurred, return it now */ - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end - || amount_left == 0) - { + || amount_left == 0) { /* * all done with this copy entry, dispose. */ - vm_map_copy_entry_unlink(copy, copy_entry); - vm_object_deallocate(copy_entry->object.vm_object); - vm_map_copy_entry_dispose(copy, copy_entry); + copy_entry_next = copy_entry->vme_next; + + if (discard_on_success) { + vm_map_copy_entry_unlink(copy, copy_entry); + assert(!copy_entry->is_sub_map); + vm_object_deallocate(VME_OBJECT(copy_entry)); + vm_map_copy_entry_dispose(copy, copy_entry); + } - if ((copy_entry = vm_map_copy_first_entry(copy)) - == vm_map_copy_to_entry(copy) && amount_left) { + if (copy_entry_next == vm_map_copy_to_entry(copy) && + amount_left) { /* * not finished copying but run out of source */ return KERN_INVALID_ADDRESS; } + + copy_entry = copy_entry_next; + src_offset = 0; } - if (amount_left == 0) + if (amount_left == 0) { return KERN_SUCCESS; + } vm_map_lock_read(dst_map); if (version.main_timestamp == dst_map->timestamp) { @@ -6480,7 +9806,7 @@ vm_map_copy_overwrite_unaligned( */ if (start != entry->vme_start) { vm_map_unlock_read(dst_map); - return KERN_INVALID_ADDRESS ; + return KERN_INVALID_ADDRESS; } } } else { @@ -6489,11 +9815,10 @@ vm_map_copy_overwrite_unaligned( * we must lookup the entry because somebody * might have changed the map behind our backs. */ - RetryLookup: - if (!vm_map_lookup_entry(dst_map, start, &entry)) - { +RetryLookup: + if (!vm_map_lookup_entry(dst_map, start, &entry)) { vm_map_unlock_read(dst_map); - return KERN_INVALID_ADDRESS ; + return KERN_INVALID_ADDRESS; } } }/* while */ @@ -6520,27 +9845,33 @@ vm_map_copy_overwrite_unaligned( * to the above pass and make sure that no wiring is involved. */ +int vm_map_copy_overwrite_aligned_src_not_internal = 0; +int vm_map_copy_overwrite_aligned_src_not_symmetric = 0; +int vm_map_copy_overwrite_aligned_src_large = 0; + static kern_return_t vm_map_copy_overwrite_aligned( - vm_map_t dst_map, - vm_map_entry_t tmp_entry, - vm_map_copy_t copy, - vm_map_offset_t start, - __unused pmap_t pmap) -{ - vm_object_t object; - vm_map_entry_t copy_entry; - vm_map_size_t copy_size; - vm_map_size_t size; - vm_map_entry_t entry; - + vm_map_t dst_map, + vm_map_entry_t tmp_entry, + vm_map_copy_t copy, + vm_map_offset_t start, + __unused pmap_t pmap) +{ + vm_object_t object; + vm_map_entry_t copy_entry; + vm_map_size_t copy_size; + vm_map_size_t size; + vm_map_entry_t entry; + while ((copy_entry = vm_map_copy_first_entry(copy)) - != vm_map_copy_to_entry(copy)) - { + != vm_map_copy_to_entry(copy)) { copy_size = (copy_entry->vme_end - copy_entry->vme_start); - + entry = tmp_entry; - assert(!entry->use_pmap); /* unnested when clipped earlier */ + if (entry->is_sub_map) { + /* unnested when clipped earlier */ + assert(!entry->use_pmap); + } if (entry == vm_map_to_entry(dst_map)) { vm_map_unlock(dst_map); return KERN_INVALID_ADDRESS; @@ -6554,9 +9885,9 @@ vm_map_copy_overwrite_aligned( */ if ((entry->vme_start != start) || ((entry->is_sub_map) - && !entry->needs_copy)) { + && !entry->needs_copy)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } assert(entry != vm_map_to_entry(dst_map)); @@ -6564,9 +9895,9 @@ vm_map_copy_overwrite_aligned( * Check protection again */ - if ( ! (entry->protection & VM_PROT_WRITE)) { + if (!(entry->protection & VM_PROT_WRITE)) { vm_map_unlock(dst_map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } /* @@ -6574,6 +9905,12 @@ vm_map_copy_overwrite_aligned( */ if (copy_size < size) { + if (entry->map_aligned && + !VM_MAP_PAGE_ALIGNED(entry->vme_start + copy_size, + VM_MAP_PAGE_MASK(dst_map))) { + /* no longer map-aligned */ + entry->map_aligned = FALSE; + } vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size); size = copy_size; } @@ -6584,7 +9921,7 @@ vm_map_copy_overwrite_aligned( if (size < copy_size) { vm_map_copy_clip_end(copy, copy_entry, - copy_entry->vme_start + size); + copy_entry->vme_start + size); copy_size = size; } @@ -6598,46 +9935,72 @@ vm_map_copy_overwrite_aligned( * installing the source data. */ - object = entry->object.vm_object; - if ((!entry->is_shared && - ((object == VM_OBJECT_NULL) || - (object->internal && !object->true_share))) || + object = VME_OBJECT(entry); + if ((!entry->is_shared && + ((object == VM_OBJECT_NULL) || + (object->internal && !object->true_share))) || entry->needs_copy) { - vm_object_t old_object = entry->object.vm_object; - vm_object_offset_t old_offset = entry->offset; - vm_object_offset_t offset; + vm_object_t old_object = VME_OBJECT(entry); + vm_object_offset_t old_offset = VME_OFFSET(entry); + vm_object_offset_t offset; /* * Ensure that the source and destination aren't * identical */ - if (old_object == copy_entry->object.vm_object && - old_offset == copy_entry->offset) { + if (old_object == VME_OBJECT(copy_entry) && + old_offset == VME_OFFSET(copy_entry)) { vm_map_copy_entry_unlink(copy, copy_entry); vm_map_copy_entry_dispose(copy, copy_entry); - if (old_object != VM_OBJECT_NULL) + if (old_object != VM_OBJECT_NULL) { vm_object_deallocate(old_object); + } start = tmp_entry->vme_end; tmp_entry = tmp_entry->vme_next; continue; } - if (entry->alias >= VM_MEMORY_MALLOC && - entry->alias <= VM_MEMORY_MALLOC_LARGE_REUSED) { +#if !CONFIG_EMBEDDED +#define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */ +#define __TRADEOFF1_COPY_SIZE (128 * 1024) /* 128 KB */ + if (VME_OBJECT(copy_entry) != VM_OBJECT_NULL && + VME_OBJECT(copy_entry)->vo_size >= __TRADEOFF1_OBJ_SIZE && + copy_size <= __TRADEOFF1_COPY_SIZE) { + /* + * Virtual vs. Physical copy tradeoff #1. + * + * Copying only a few pages out of a large + * object: do a physical copy instead of + * a virtual copy, to avoid possibly keeping + * the entire large object alive because of + * those few copy-on-write pages. + */ + vm_map_copy_overwrite_aligned_src_large++; + goto slow_copy; + } +#endif /* !CONFIG_EMBEDDED */ + + if ((dst_map->pmap != kernel_pmap) && + (VME_ALIAS(entry) >= VM_MEMORY_MALLOC) && + (VME_ALIAS(entry) <= VM_MEMORY_MALLOC_MEDIUM)) { vm_object_t new_object, new_shadow; /* * We're about to map something over a mapping * established by malloc()... */ - new_object = copy_entry->object.vm_object; + new_object = VME_OBJECT(copy_entry); if (new_object != VM_OBJECT_NULL) { vm_object_lock_shared(new_object); } while (new_object != VM_OBJECT_NULL && - new_object->internal) { +#if !CONFIG_EMBEDDED + !new_object->true_share && + new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && +#endif /* !CONFIG_EMBEDDED */ + new_object->internal) { new_shadow = new_object->shadow; if (new_shadow == VM_OBJECT_NULL) { break; @@ -6657,9 +10020,24 @@ vm_map_copy_overwrite_aligned( * let's go off the optimized * path... */ + vm_map_copy_overwrite_aligned_src_not_internal++; + vm_object_unlock(new_object); + goto slow_copy; + } +#if !CONFIG_EMBEDDED + if (new_object->true_share || + new_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) { + /* + * Same if there's a "true_share" + * object in the shadow chain, or + * an object with a non-default + * (SYMMETRIC) copy strategy. + */ + vm_map_copy_overwrite_aligned_src_not_symmetric++; vm_object_unlock(new_object); goto slow_copy; } +#endif /* !CONFIG_EMBEDDED */ vm_object_unlock(new_object); } /* @@ -6671,57 +10049,67 @@ vm_map_copy_overwrite_aligned( } if (old_object != VM_OBJECT_NULL) { - if(entry->is_sub_map) { - if(entry->use_pmap) { + if (entry->is_sub_map) { + if (entry->use_pmap) { #ifndef NO_NESTED_PMAP - pmap_unnest(dst_map->pmap, - (addr64_t)entry->vme_start, - entry->vme_end - entry->vme_start); -#endif /* NO_NESTED_PMAP */ - if(dst_map->mapped) { + pmap_unnest(dst_map->pmap, + (addr64_t)entry->vme_start, + entry->vme_end - entry->vme_start); +#endif /* NO_NESTED_PMAP */ + if (dst_map->mapped_in_other_pmaps) { /* clean up parent */ /* map/maps */ vm_map_submap_pmap_clean( dst_map, entry->vme_start, entry->vme_end, - entry->object.sub_map, - entry->offset); + VME_SUBMAP(entry), + VME_OFFSET(entry)); } } else { vm_map_submap_pmap_clean( - dst_map, entry->vme_start, + dst_map, entry->vme_start, entry->vme_end, - entry->object.sub_map, - entry->offset); + VME_SUBMAP(entry), + VME_OFFSET(entry)); } - vm_map_deallocate( - entry->object.sub_map); - } else { - if(dst_map->mapped) { - vm_object_pmap_protect( - entry->object.vm_object, - entry->offset, - entry->vme_end + vm_map_deallocate(VME_SUBMAP(entry)); + } else { + if (dst_map->mapped_in_other_pmaps) { + vm_object_pmap_protect_options( + VME_OBJECT(entry), + VME_OFFSET(entry), + entry->vme_end - entry->vme_start, PMAP_NULL, entry->vme_start, - VM_PROT_NONE); + VM_PROT_NONE, + PMAP_OPTIONS_REMOVE); } else { - pmap_remove(dst_map->pmap, - (addr64_t)(entry->vme_start), - (addr64_t)(entry->vme_end)); + pmap_remove_options( + dst_map->pmap, + (addr64_t)(entry->vme_start), + (addr64_t)(entry->vme_end), + PMAP_OPTIONS_REMOVE); } vm_object_deallocate(old_object); - } + } } + if (entry->iokit_acct) { + /* keep using iokit accounting */ + entry->use_pmap = FALSE; + } else { + /* use pmap accounting */ + entry->use_pmap = TRUE; + } entry->is_sub_map = FALSE; - entry->object = copy_entry->object; - object = entry->object.vm_object; + VME_OBJECT_SET(entry, VME_OBJECT(copy_entry)); + object = VME_OBJECT(entry); entry->needs_copy = copy_entry->needs_copy; entry->wired_count = 0; entry->user_wired_count = 0; - offset = entry->offset = copy_entry->offset; + offset = VME_OFFSET(copy_entry); + VME_OFFSET_SET(entry, offset); vm_map_copy_entry_unlink(copy, copy_entry); vm_map_copy_entry_dispose(copy, copy_entry); @@ -6746,14 +10134,21 @@ vm_map_copy_overwrite_aligned( start = tmp_entry->vme_end; tmp_entry = tmp_entry->vme_next; } else { - vm_map_version_t version; - vm_object_t dst_object; - vm_object_offset_t dst_offset; - kern_return_t r; - - slow_copy: - dst_object = entry->object.vm_object; - dst_offset = entry->offset; + vm_map_version_t version; + vm_object_t dst_object; + vm_object_offset_t dst_offset; + kern_return_t r; + +slow_copy: + if (entry->needs_copy) { + VME_OBJECT_SHADOW(entry, + (entry->vme_end - + entry->vme_start)); + entry->needs_copy = FALSE; + } + + dst_object = VME_OBJECT(entry); + dst_offset = VME_OFFSET(entry); /* * Take an object reference, and record @@ -6777,9 +10172,9 @@ vm_map_copy_overwrite_aligned( dst_object = vm_object_allocate( entry->vme_end - entry->vme_start); dst_offset = 0; - entry->object.vm_object = dst_object; - entry->offset = dst_offset; - + VME_OBJECT_SET(entry, dst_object); + VME_OFFSET_SET(entry, dst_offset); + assert(entry->use_pmap); } vm_object_reference(dst_object); @@ -6795,8 +10190,8 @@ vm_map_copy_overwrite_aligned( copy_size = size; r = vm_fault_copy( - copy_entry->object.vm_object, - copy_entry->offset, + VME_OBJECT(copy_entry), + VME_OFFSET(copy_entry), ©_size, dst_object, dst_offset, @@ -6814,8 +10209,9 @@ vm_map_copy_overwrite_aligned( * If a hard error occurred, return it now */ - if (r != KERN_SUCCESS) - return(r); + if (r != KERN_SUCCESS) { + return r; + } if (copy_size != 0) { /* @@ -6823,9 +10219,9 @@ vm_map_copy_overwrite_aligned( */ vm_map_copy_clip_end(copy, copy_entry, - copy_entry->vme_start + copy_size); + copy_entry->vme_start + copy_size); vm_map_copy_entry_unlink(copy, copy_entry); - vm_object_deallocate(copy_entry->object.vm_object); + vm_object_deallocate(VME_OBJECT(copy_entry)); vm_map_copy_entry_dispose(copy, copy_entry); } @@ -6838,9 +10234,17 @@ vm_map_copy_overwrite_aligned( start += copy_size; vm_map_lock(dst_map); - if (version.main_timestamp == dst_map->timestamp) { + if (version.main_timestamp == dst_map->timestamp && + copy_size != 0) { /* We can safely use saved tmp_entry value */ + if (tmp_entry->map_aligned && + !VM_MAP_PAGE_ALIGNED( + start, + VM_MAP_PAGE_MASK(dst_map))) { + /* no longer map-aligned */ + tmp_entry->map_aligned = FALSE; + } vm_map_clip_end(dst_map, tmp_entry, start); tmp_entry = tmp_entry->vme_next; } else { @@ -6848,14 +10252,21 @@ vm_map_copy_overwrite_aligned( if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; + } + if (tmp_entry->map_aligned && + !VM_MAP_PAGE_ALIGNED( + start, + VM_MAP_PAGE_MASK(dst_map))) { + /* no longer map-aligned */ + tmp_entry->map_aligned = FALSE; } vm_map_clip_start(dst_map, tmp_entry, start); } } }/* while */ - return(KERN_SUCCESS); + return KERN_SUCCESS; }/* vm_map_copy_overwrite_aligned */ /* @@ -6870,45 +10281,45 @@ vm_map_copy_overwrite_aligned( */ static kern_return_t vm_map_copyin_kernel_buffer( - vm_map_t src_map, - vm_map_offset_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - vm_map_copy_t *copy_result) + vm_map_t src_map, + vm_map_offset_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result) { kern_return_t kr; vm_map_copy_t copy; vm_size_t kalloc_size; - if ((vm_size_t) len != len) { - /* "len" is too big and doesn't fit in a "vm_size_t" */ - return KERN_RESOURCE_SHORTAGE; + if (len > msg_ool_size_small) { + return KERN_INVALID_ARGUMENT; } - kalloc_size = (vm_size_t) (sizeof(struct vm_map_copy) + len); - assert((vm_map_size_t) kalloc_size == sizeof (struct vm_map_copy) + len); - copy = (vm_map_copy_t) kalloc(kalloc_size); + kalloc_size = (vm_size_t)(cpy_kdata_hdr_sz + len); + + copy = (vm_map_copy_t)kalloc(kalloc_size); if (copy == VM_MAP_COPY_NULL) { return KERN_RESOURCE_SHORTAGE; } copy->type = VM_MAP_COPY_KERNEL_BUFFER; copy->size = len; copy->offset = 0; - copy->cpy_kdata = (void *) (copy + 1); - copy->cpy_kalloc_size = kalloc_size; - kr = copyinmap(src_map, src_addr, copy->cpy_kdata, (vm_size_t) len); + kr = copyinmap(src_map, src_addr, copy->cpy_kdata, (vm_size_t)len); if (kr != KERN_SUCCESS) { kfree(copy, kalloc_size); return kr; } if (src_destroy) { - (void) vm_map_remove(src_map, vm_map_trunc_page(src_addr), - vm_map_round_page(src_addr + len), - VM_MAP_REMOVE_INTERRUPTIBLE | - VM_MAP_REMOVE_WAIT_FOR_KWIRE | - (src_map == kernel_map) ? - VM_MAP_REMOVE_KUNWIRE : 0); + (void) vm_map_remove( + src_map, + vm_map_trunc_page(src_addr, + VM_MAP_PAGE_MASK(src_map)), + vm_map_round_page(src_addr + len, + VM_MAP_PAGE_MASK(src_map)), + (VM_MAP_REMOVE_INTERRUPTIBLE | + VM_MAP_REMOVE_WAIT_FOR_KWIRE | + ((src_map == kernel_map) ? VM_MAP_REMOVE_KUNWIRE : VM_MAP_REMOVE_NO_FLAGS))); } *copy_result = copy; return KERN_SUCCESS; @@ -6928,50 +10339,68 @@ vm_map_copyin_kernel_buffer( static int vm_map_copyout_kernel_buffer_failures = 0; static kern_return_t vm_map_copyout_kernel_buffer( - vm_map_t map, - vm_map_address_t *addr, /* IN/OUT */ - vm_map_copy_t copy, - boolean_t overwrite) + vm_map_t map, + vm_map_address_t *addr, /* IN/OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size, + boolean_t overwrite, + boolean_t consume_on_success) { kern_return_t kr = KERN_SUCCESS; thread_t thread = current_thread(); - if (!overwrite) { + assert(copy->size == copy_size); + + /* + * check for corrupted vm_map_copy structure + */ + if (copy_size > msg_ool_size_small || copy->offset) { + panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld", + (long long)copy->size, (long long)copy->offset); + } + if (!overwrite) { /* * Allocate space in the target map for the data */ *addr = 0; - kr = vm_map_enter(map, - addr, - vm_map_round_page(copy->size), - (vm_map_offset_t) 0, - VM_FLAGS_ANYWHERE, - VM_OBJECT_NULL, - (vm_object_offset_t) 0, - FALSE, - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); - if (kr != KERN_SUCCESS) + kr = vm_map_enter(map, + addr, + vm_map_round_page(copy_size, + VM_MAP_PAGE_MASK(map)), + (vm_map_offset_t) 0, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + VM_OBJECT_NULL, + (vm_object_offset_t) 0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) { return kr; + } +#if KASAN + if (map->pmap == kernel_pmap) { + kasan_notify_address(*addr, copy->size); + } +#endif } /* * Copyout the data from the kernel buffer to the target map. - */ + */ if (thread->map == map) { - /* * If the target map is the current map, just do * the copy. */ - assert((vm_size_t) copy->size == copy->size); - if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) { + assert((vm_size_t)copy_size == copy_size); + if (copyout(copy->cpy_kdata, *addr, (vm_size_t)copy_size)) { kr = KERN_INVALID_ADDRESS; } - } - else { + } else { vm_map_t oldmap; /* @@ -6982,12 +10411,12 @@ vm_map_copyout_kernel_buffer( vm_map_reference(map); oldmap = vm_map_switch(map); - assert((vm_size_t) copy->size == copy->size); - if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) { + assert((vm_size_t)copy_size == copy_size); + if (copyout(copy->cpy_kdata, *addr, (vm_size_t)copy_size)) { vm_map_copyout_kernel_buffer_failures++; kr = KERN_INVALID_ADDRESS; } - + (void) vm_map_switch(oldmap); vm_map_deallocate(map); } @@ -6998,37 +10427,165 @@ vm_map_copyout_kernel_buffer( /* * Deallocate the space we allocated in the target map. */ - (void) vm_map_remove(map, - vm_map_trunc_page(*addr), - vm_map_round_page(*addr + - vm_map_round_page(copy->size)), - VM_MAP_NO_FLAGS); + (void) vm_map_remove( + map, + vm_map_trunc_page(*addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page((*addr + + vm_map_round_page(copy_size, + VM_MAP_PAGE_MASK(map))), + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_NO_FLAGS); *addr = 0; } } else { /* copy was successful, dicard the copy structure */ - kfree(copy, copy->cpy_kalloc_size); + if (consume_on_success) { + kfree(copy, copy_size + cpy_kdata_hdr_sz); + } } return kr; } - + /* - * Macro: vm_map_copy_insert - * + * Routine: vm_map_copy_insert [internal use only] + * * Description: * Link a copy chain ("copy") into a map at the * specified location (after "where"). * Side effects: * The copy chain is destroyed. - * Warning: - * The arguments are evaluated multiple times. */ -#define vm_map_copy_insert(map, where, copy) \ -MACRO_BEGIN \ - vm_map_store_copy_insert(map, where, copy); \ - zfree(vm_map_copy_zone, copy); \ -MACRO_END +static void +vm_map_copy_insert( + vm_map_t map, + vm_map_entry_t after_where, + vm_map_copy_t copy) +{ + vm_map_entry_t entry; + + while (vm_map_copy_first_entry(copy) != vm_map_copy_to_entry(copy)) { + entry = vm_map_copy_first_entry(copy); + vm_map_copy_entry_unlink(copy, entry); + vm_map_store_entry_link(map, after_where, entry, + VM_MAP_KERNEL_FLAGS_NONE); + after_where = entry; + } + zfree(vm_map_copy_zone, copy); +} + +void +vm_map_copy_remap( + vm_map_t map, + vm_map_entry_t where, + vm_map_copy_t copy, + vm_map_offset_t adjustment, + vm_prot_t cur_prot, + vm_prot_t max_prot, + vm_inherit_t inheritance) +{ + vm_map_entry_t copy_entry, new_entry; + + for (copy_entry = vm_map_copy_first_entry(copy); + copy_entry != vm_map_copy_to_entry(copy); + copy_entry = copy_entry->vme_next) { + /* get a new VM map entry for the map */ + new_entry = vm_map_entry_create(map, + !map->hdr.entries_pageable); + /* copy the "copy entry" to the new entry */ + vm_map_entry_copy(new_entry, copy_entry); + /* adjust "start" and "end" */ + new_entry->vme_start += adjustment; + new_entry->vme_end += adjustment; + /* clear some attributes */ + new_entry->inheritance = inheritance; + new_entry->protection = cur_prot; + new_entry->max_protection = max_prot; + new_entry->behavior = VM_BEHAVIOR_DEFAULT; + /* take an extra reference on the entry's "object" */ + if (new_entry->is_sub_map) { + assert(!new_entry->use_pmap); /* not nested */ + vm_map_lock(VME_SUBMAP(new_entry)); + vm_map_reference(VME_SUBMAP(new_entry)); + vm_map_unlock(VME_SUBMAP(new_entry)); + } else { + vm_object_reference(VME_OBJECT(new_entry)); + } + /* insert the new entry in the map */ + vm_map_store_entry_link(map, where, new_entry, + VM_MAP_KERNEL_FLAGS_NONE); + /* continue inserting the "copy entries" after the new entry */ + where = new_entry; + } +} + + +/* + * Returns true if *size matches (or is in the range of) copy->size. + * Upon returning true, the *size field is updated with the actual size of the + * copy object (may be different for VM_MAP_COPY_ENTRY_LIST types) + */ +boolean_t +vm_map_copy_validate_size( + vm_map_t dst_map, + vm_map_copy_t copy, + vm_map_size_t *size) +{ + if (copy == VM_MAP_COPY_NULL) { + return FALSE; + } + vm_map_size_t copy_sz = copy->size; + vm_map_size_t sz = *size; + switch (copy->type) { + case VM_MAP_COPY_OBJECT: + case VM_MAP_COPY_KERNEL_BUFFER: + if (sz == copy_sz) { + return TRUE; + } + break; + case VM_MAP_COPY_ENTRY_LIST: + /* + * potential page-size rounding prevents us from exactly + * validating this flavor of vm_map_copy, but we can at least + * assert that it's within a range. + */ + if (copy_sz >= sz && + copy_sz <= vm_map_round_page(sz, VM_MAP_PAGE_MASK(dst_map))) { + *size = copy_sz; + return TRUE; + } + break; + default: + break; + } + return FALSE; +} + +/* + * Routine: vm_map_copyout_size + * + * Description: + * Copy out a copy chain ("copy") into newly-allocated + * space in the destination map. Uses a prevalidated + * size for the copy object (vm_map_copy_validate_size). + * + * If successful, consumes the copy object. + * Otherwise, the caller is responsible for it. + */ +kern_return_t +vm_map_copyout_size( + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size) +{ + return vm_map_copyout_internal(dst_map, dst_addr, copy, copy_size, + TRUE, /* consume_on_success */ + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); +} /* * Routine: vm_map_copyout @@ -7042,17 +10599,35 @@ MACRO_END */ kern_return_t vm_map_copyout( - vm_map_t dst_map, - vm_map_address_t *dst_addr, /* OUT */ - vm_map_copy_t copy) + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy) { - vm_map_size_t size; - vm_map_size_t adjustment; - vm_map_offset_t start; - vm_object_offset_t vm_copy_start; - vm_map_entry_t last; - register - vm_map_entry_t entry; + return vm_map_copyout_internal(dst_map, dst_addr, copy, copy ? copy->size : 0, + TRUE, /* consume_on_success */ + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); +} + +kern_return_t +vm_map_copyout_internal( + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size, + boolean_t consume_on_success, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_map_size_t size; + vm_map_size_t adjustment; + vm_map_offset_t start; + vm_object_offset_t vm_copy_start; + vm_map_entry_t last; + vm_map_entry_t entry; + vm_map_entry_t hole_entry; /* * Check for null copy object. @@ -7060,7 +10635,12 @@ vm_map_copyout( if (copy == VM_MAP_COPY_NULL) { *dst_addr = 0; - return(KERN_SUCCESS); + return KERN_SUCCESS; + } + + if (copy->size != copy_size) { + *dst_addr = 0; + return KERN_FAILURE; } /* @@ -7069,25 +10649,32 @@ vm_map_copyout( */ if (copy->type == VM_MAP_COPY_OBJECT) { - vm_object_t object = copy->cpy_object; - kern_return_t kr; - vm_object_offset_t offset; + vm_object_t object = copy->cpy_object; + kern_return_t kr; + vm_object_offset_t offset; offset = vm_object_trunc_page(copy->offset); - size = vm_map_round_page(copy->size + - (vm_map_size_t)(copy->offset - offset)); + size = vm_map_round_page((copy_size + + (vm_map_size_t)(copy->offset - + offset)), + VM_MAP_PAGE_MASK(dst_map)); *dst_addr = 0; kr = vm_map_enter(dst_map, dst_addr, size, - (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, - object, offset, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_DEFAULT); - if (kr != KERN_SUCCESS) - return(kr); + (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + object, offset, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) { + return kr; + } /* Account for non-pagealigned copy object */ *dst_addr += (vm_map_offset_t)(copy->offset - offset); - zfree(vm_map_copy_zone, copy); - return(KERN_SUCCESS); + if (consume_on_success) { + zfree(vm_map_copy_zone, copy); + } + return KERN_SUCCESS; } /* @@ -7096,54 +10683,131 @@ vm_map_copyout( */ if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { - return(vm_map_copyout_kernel_buffer(dst_map, dst_addr, - copy, FALSE)); + return vm_map_copyout_kernel_buffer(dst_map, dst_addr, + copy, copy_size, FALSE, + consume_on_success); } + /* * Find space for the data */ - vm_copy_start = vm_object_trunc_page(copy->offset); - size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size) - - vm_copy_start; + vm_copy_start = vm_map_trunc_page((vm_map_size_t)copy->offset, + VM_MAP_COPY_PAGE_MASK(copy)); + size = vm_map_round_page((vm_map_size_t)copy->offset + copy_size, + VM_MAP_COPY_PAGE_MASK(copy)) + - vm_copy_start; + -StartAgain: ; +StartAgain:; vm_map_lock(dst_map); - if( dst_map->disable_vmentry_reuse == TRUE) { + if (dst_map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(dst_map, entry, start); last = entry; } else { - assert(first_free_is_valid(dst_map)); - start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ? - vm_map_min(dst_map) : last->vme_end; + if (dst_map->holelistenabled) { + hole_entry = CAST_TO_VM_MAP_ENTRY(dst_map->holes_list); + + if (hole_entry == NULL) { + /* + * No more space in the map? + */ + vm_map_unlock(dst_map); + return KERN_NO_SPACE; + } + + last = hole_entry; + start = last->vme_start; + } else { + assert(first_free_is_valid(dst_map)); + start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ? + vm_map_min(dst_map) : last->vme_end; + } + start = vm_map_round_page(start, + VM_MAP_PAGE_MASK(dst_map)); } while (TRUE) { - vm_map_entry_t next = last->vme_next; - vm_map_offset_t end = start + size; + vm_map_entry_t next = last->vme_next; + vm_map_offset_t end = start + size; if ((end > dst_map->max_offset) || (end < start)) { if (dst_map->wait_for_space) { if (size <= (dst_map->max_offset - dst_map->min_offset)) { assert_wait((event_t) dst_map, - THREAD_INTERRUPTIBLE); + THREAD_INTERRUPTIBLE); vm_map_unlock(dst_map); thread_block(THREAD_CONTINUE_NULL); goto StartAgain; } } vm_map_unlock(dst_map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } - if ((next == vm_map_to_entry(dst_map)) || - (next->vme_start >= end)) - break; + if (dst_map->holelistenabled) { + if (last->vme_end >= end) { + break; + } + } else { + /* + * If there are no more entries, we must win. + * + * OR + * + * If there is another entry, it must be + * after the end of the potential new region. + */ + + if (next == vm_map_to_entry(dst_map)) { + break; + } + + if (next->vme_start >= end) { + break; + } + } last = next; - start = last->vme_end; + + if (dst_map->holelistenabled) { + if (last == CAST_TO_VM_MAP_ENTRY(dst_map->holes_list)) { + /* + * Wrapped around + */ + vm_map_unlock(dst_map); + return KERN_NO_SPACE; + } + start = last->vme_start; + } else { + start = last->vme_end; + } + start = vm_map_round_page(start, + VM_MAP_PAGE_MASK(dst_map)); + } + + if (dst_map->holelistenabled) { + if (vm_map_lookup_entry(dst_map, last->vme_start, &last)) { + panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", last, (unsigned long long)last->vme_start); + } + } + + + adjustment = start - vm_copy_start; + if (!consume_on_success) { + /* + * We're not allowed to consume "copy", so we'll have to + * copy its map entries into the destination map below. + * No need to re-allocate map entries from the correct + * (pageable or not) zone, since we'll get new map entries + * during the transfer. + * We'll also adjust the map entries's "start" and "end" + * during the transfer, to keep "copy"'s entries consistent + * with its "offset". + */ + goto after_adjustments; } /* @@ -7157,15 +10821,13 @@ StartAgain: ; * Mismatches occur when dealing with the default * pager. */ - zone_t old_zone; - vm_map_entry_t next, new; + zone_t old_zone; + vm_map_entry_t next, new; /* * Find the zone that the copies were allocated from */ - old_zone = (copy->cpy_hdr.entries_pageable) - ? vm_map_entry_zone - : vm_map_kentry_zone; + entry = vm_map_copy_first_entry(copy); /* @@ -7179,13 +10841,19 @@ StartAgain: ; * Copy each entry. */ while (entry != vm_map_copy_to_entry(copy)) { - new = vm_map_copy_entry_create(copy); + new = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable); vm_map_entry_copy_full(new, entry); - new->use_pmap = FALSE; /* clr address space specifics */ + new->vme_no_copy_on_read = FALSE; + assert(!new->iokit_acct); + if (new->is_sub_map) { + /* clr address space specifics */ + new->use_pmap = FALSE; + } vm_map_copy_entry_link(copy, - vm_map_copy_last_entry(copy), - new); + vm_map_copy_last_entry(copy), + new); next = entry->vme_next; + old_zone = entry->from_reserved_zone ? vm_map_entry_reserved_zone : vm_map_entry_zone; zfree(old_zone, entry); entry = next; } @@ -7196,13 +10864,29 @@ StartAgain: ; * reset the region attributes. */ - adjustment = start - vm_copy_start; for (entry = vm_map_copy_first_entry(copy); - entry != vm_map_copy_to_entry(copy); - entry = entry->vme_next) { + entry != vm_map_copy_to_entry(copy); + entry = entry->vme_next) { + if (VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT) { + /* + * We're injecting this copy entry into a map that + * has the standard page alignment, so clear + * "map_aligned" (which might have been inherited + * from the original map entry). + */ + entry->map_aligned = FALSE; + } + entry->vme_start += adjustment; entry->vme_end += adjustment; + if (entry->map_aligned) { + assert(VM_MAP_PAGE_ALIGNED(entry->vme_start, + VM_MAP_PAGE_MASK(dst_map))); + assert(VM_MAP_PAGE_ALIGNED(entry->vme_end, + VM_MAP_PAGE_MASK(dst_map))); + } + entry->inheritance = VM_INHERIT_DEFAULT; entry->protection = VM_PROT_DEFAULT; entry->max_protection = VM_PROT_ALL; @@ -7213,23 +10897,24 @@ StartAgain: ; * map the pages into the destination map. */ if (entry->wired_count != 0) { - register vm_map_offset_t va; - vm_object_offset_t offset; - register vm_object_t object; + vm_map_offset_t va; + vm_object_offset_t offset; + vm_object_t object; vm_prot_t prot; - int type_of_fault; + int type_of_fault; - object = entry->object.vm_object; - offset = entry->offset; + object = VME_OBJECT(entry); + offset = VME_OFFSET(entry); va = entry->vme_start; pmap_pageable(dst_map->pmap, - entry->vme_start, - entry->vme_end, - TRUE); + entry->vme_start, + entry->vme_end, + TRUE); while (va < entry->vme_end) { - register vm_page_t m; + vm_page_t m; + struct vm_object_fault_info fault_info = {}; /* * Look up the page in the object. @@ -7252,29 +10937,37 @@ StartAgain: ; m = vm_page_lookup(object, offset); if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) || - m->absent) + m->vmp_absent) { panic("vm_map_copyout: wiring %p", m); - - /* - * ENCRYPTED SWAP: - * The page is assumed to be wired here, so it - * shouldn't be encrypted. Otherwise, we - * couldn't enter it in the page table, since - * we don't want the user to see the encrypted - * data. - */ - ASSERT_PAGE_DECRYPTED(m); + } prot = entry->protection; - if (override_nx(dst_map, entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(dst_map, VME_ALIAS(entry)) && + prot) { + prot |= VM_PROT_EXECUTE; + } type_of_fault = DBG_CACHE_HIT_FAULT; - vm_fault_enter(m, dst_map->pmap, va, prot, prot, - VM_PAGE_WIRED(m), FALSE, FALSE, FALSE, - &type_of_fault); + fault_info.user_tag = VME_ALIAS(entry); + fault_info.pmap_options = 0; + if (entry->iokit_acct || + (!entry->is_sub_map && !entry->use_pmap)) { + fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; + } + + vm_fault_enter(m, + dst_map->pmap, + va, + prot, + prot, + VM_PAGE_WIRED(m), + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + &fault_info, + NULL, /* need_retry */ + &type_of_fault); vm_object_unlock(object); @@ -7284,17 +10977,27 @@ StartAgain: ; } } +after_adjustments: + /* * Correct the page alignment for the result */ *dst_addr = start + (copy->offset - vm_copy_start); +#if KASAN + kasan_notify_address(*dst_addr, size); +#endif + /* * Update the hints and the map size */ - SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy)); + if (consume_on_success) { + SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy)); + } else { + SAVE_HINT_MAP_WRITE(dst_map, last); + } dst_map->size += size; @@ -7302,7 +11005,13 @@ StartAgain: ; * Link in the copy */ - vm_map_copy_insert(dst_map, last, copy); + if (consume_on_success) { + vm_map_copy_insert(dst_map, last, copy); + } else { + vm_map_copy_remap(dst_map, last, copy, adjustment, + cur_protection, max_protection, + inheritance); + } vm_map_unlock(dst_map); @@ -7310,7 +11019,7 @@ StartAgain: ; * XXX If wiring_required, call vm_map_pageable */ - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -7325,14 +11034,14 @@ StartAgain: ; kern_return_t vm_map_copyin( - vm_map_t src_map, - vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - vm_map_copy_t *copy_result) /* OUT */ + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result) /* OUT */ { - return(vm_map_copyin_common(src_map, src_addr, len, src_destroy, - FALSE, copy_result, FALSE)); + return vm_map_copyin_common(src_map, src_addr, len, src_destroy, + FALSE, copy_result, FALSE); } /* @@ -7355,44 +11064,80 @@ vm_map_copyin( */ typedef struct submap_map { - vm_map_t parent_map; - vm_map_offset_t base_start; - vm_map_offset_t base_end; - vm_map_size_t base_len; + vm_map_t parent_map; + vm_map_offset_t base_start; + vm_map_offset_t base_end; + vm_map_size_t base_len; struct submap_map *next; } submap_map_t; kern_return_t vm_map_copyin_common( - vm_map_t src_map, + vm_map_t src_map, vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - __unused boolean_t src_volatile, - vm_map_copy_t *copy_result, /* OUT */ - boolean_t use_maxprot) -{ - vm_map_entry_t tmp_entry; /* Result of last map lookup -- - * in multi-level lookup, this - * entry contains the actual - * vm_object/offset. - */ - register - vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */ + vm_map_size_t len, + boolean_t src_destroy, + __unused boolean_t src_volatile, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t use_maxprot) +{ + int flags; - vm_map_offset_t src_start; /* Start of current entry -- - * where copy is taking place now - */ - vm_map_offset_t src_end; /* End of entire region to be - * copied */ + flags = 0; + if (src_destroy) { + flags |= VM_MAP_COPYIN_SRC_DESTROY; + } + if (use_maxprot) { + flags |= VM_MAP_COPYIN_USE_MAXPROT; + } + return vm_map_copyin_internal(src_map, + src_addr, + len, + flags, + copy_result); +} +kern_return_t +vm_map_copyin_internal( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + int flags, + vm_map_copy_t *copy_result) /* OUT */ +{ + vm_map_entry_t tmp_entry; /* Result of last map lookup -- + * in multi-level lookup, this + * entry contains the actual + * vm_object/offset. + */ + vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */ + + vm_map_offset_t src_start; /* Start of current entry -- + * where copy is taking place now + */ + vm_map_offset_t src_end; /* End of entire region to be + * copied */ vm_map_offset_t src_base; - vm_map_t base_map = src_map; - boolean_t map_share=FALSE; - submap_map_t *parent_maps = NULL; + vm_map_t base_map = src_map; + boolean_t map_share = FALSE; + submap_map_t *parent_maps = NULL; + + vm_map_copy_t copy; /* Resulting copy */ + vm_map_address_t copy_addr; + vm_map_size_t copy_size; + boolean_t src_destroy; + boolean_t use_maxprot; + boolean_t preserve_purgeable; + boolean_t entry_was_shared; + vm_map_entry_t saved_src_entry; + + if (flags & ~VM_MAP_COPYIN_ALL_FLAGS) { + return KERN_INVALID_ARGUMENT; + } - register - vm_map_copy_t copy; /* Resulting copy */ - vm_map_address_t copy_addr; + src_destroy = (flags & VM_MAP_COPYIN_SRC_DESTROY) ? TRUE : FALSE; + use_maxprot = (flags & VM_MAP_COPYIN_USE_MAXPROT) ? TRUE : FALSE; + preserve_purgeable = + (flags & VM_MAP_COPYIN_PRESERVE_PURGEABLE) ? TRUE : FALSE; /* * Check for copies of zero bytes. @@ -7400,15 +11145,24 @@ vm_map_copyin_common( if (len == 0) { *copy_result = VM_MAP_COPY_NULL; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* * Check that the end address doesn't overflow */ src_end = src_addr + len; - if (src_end < src_addr) + if (src_end < src_addr) { return KERN_INVALID_ADDRESS; + } + + /* + * Compute (page aligned) start and end of region + */ + src_start = vm_map_trunc_page(src_addr, + VM_MAP_PAGE_MASK(src_map)); + src_end = vm_map_round_page(src_end, + VM_MAP_PAGE_MASK(src_map)); /* * If the copy is sufficiently small, use a kernel buffer instead @@ -7416,71 +11170,105 @@ vm_map_copyin_common( * setting up VM (and taking C-O-W faults) dominates the copy costs * for small regions. */ - if ((len < msg_ool_size_small) && !use_maxprot) + if ((len < msg_ool_size_small) && + !use_maxprot && + !preserve_purgeable && + !(flags & VM_MAP_COPYIN_ENTRY_LIST) && + /* + * Since the "msg_ool_size_small" threshold was increased and + * vm_map_copyin_kernel_buffer() doesn't handle accesses beyond the + * address space limits, we revert to doing a virtual copy if the + * copied range goes beyond those limits. Otherwise, mach_vm_read() + * of the commpage would now fail when it used to work. + */ + (src_start >= vm_map_min(src_map) && + src_start < vm_map_max(src_map) && + src_end >= vm_map_min(src_map) && + src_end < vm_map_max(src_map))) { return vm_map_copyin_kernel_buffer(src_map, src_addr, len, - src_destroy, copy_result); - - /* - * Compute (page aligned) start and end of region - */ - src_start = vm_map_trunc_page(src_addr); - src_end = vm_map_round_page(src_end); - - XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", src_map, src_addr, len, src_destroy, 0); + src_destroy, copy_result); + } /* * Allocate a header element for the list. * - * Use the start and end in the header to + * Use the start and end in the header to * remember the endpoints prior to rounding. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); - vm_map_copy_first_entry(copy) = - vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); + copy = vm_map_copy_allocate(); copy->type = VM_MAP_COPY_ENTRY_LIST; - copy->cpy_hdr.nentries = 0; copy->cpy_hdr.entries_pageable = TRUE; +#if 00 + copy->cpy_hdr.page_shift = src_map->hdr.page_shift; +#else + /* + * The copy entries can be broken down for a variety of reasons, + * so we can't guarantee that they will remain map-aligned... + * Will need to adjust the first copy_entry's "vme_start" and + * the last copy_entry's "vme_end" to be rounded to PAGE_MASK + * rather than the original map's alignment. + */ + copy->cpy_hdr.page_shift = PAGE_SHIFT; +#endif - vm_map_store_init( &(copy->cpy_hdr) ); + vm_map_store_init( &(copy->cpy_hdr)); copy->offset = src_addr; copy->size = len; - - new_entry = vm_map_copy_entry_create(copy); - -#define RETURN(x) \ - MACRO_BEGIN \ - vm_map_unlock(src_map); \ - if(src_map != base_map) \ - vm_map_deallocate(src_map); \ - if (new_entry != VM_MAP_ENTRY_NULL) \ - vm_map_copy_entry_dispose(copy,new_entry); \ - vm_map_copy_discard(copy); \ - { \ - submap_map_t *_ptr; \ - \ - for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \ - parent_maps=parent_maps->next; \ - if (_ptr->parent_map != base_map) \ - vm_map_deallocate(_ptr->parent_map); \ - kfree(_ptr, sizeof(submap_map_t)); \ - } \ - } \ - MACRO_RETURN(x); \ + + new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable); + +#define RETURN(x) \ + MACRO_BEGIN \ + vm_map_unlock(src_map); \ + if(src_map != base_map) \ + vm_map_deallocate(src_map); \ + if (new_entry != VM_MAP_ENTRY_NULL) \ + vm_map_copy_entry_dispose(copy,new_entry); \ + vm_map_copy_discard(copy); \ + { \ + submap_map_t *_ptr; \ + \ + for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \ + parent_maps=parent_maps->next; \ + if (_ptr->parent_map != base_map) \ + vm_map_deallocate(_ptr->parent_map); \ + kfree(_ptr, sizeof(submap_map_t)); \ + } \ + } \ + MACRO_RETURN(x); \ MACRO_END /* * Find the beginning of the region. */ - vm_map_lock(src_map); + vm_map_lock(src_map); - if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) + /* + * Lookup the original "src_addr" rather than the truncated + * "src_start", in case "src_start" falls in a non-map-aligned + * map entry *before* the map entry that contains "src_addr"... + */ + if (!vm_map_lookup_entry(src_map, src_addr, &tmp_entry)) { RETURN(KERN_INVALID_ADDRESS); - if(!tmp_entry->is_sub_map) { + } + if (!tmp_entry->is_sub_map) { + /* + * ... but clip to the map-rounded "src_start" rather than + * "src_addr" to preserve map-alignment. We'll adjust the + * first copy entry at the end, if needed. + */ vm_map_clip_start(src_map, tmp_entry, src_start); } + if (src_start < tmp_entry->vme_start) { + /* + * Move "src_start" up to the start of the + * first map entry to copy. + */ + src_start = tmp_entry->vme_start; + } /* set for later submap fix-up */ copy_addr = src_start; @@ -7489,32 +11277,30 @@ vm_map_copyin_common( */ while (TRUE) { - register - vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ - vm_map_size_t src_size; /* Size of source - * map entry (in both - * maps) - */ - - register - vm_object_t src_object; /* Object to copy */ - vm_object_offset_t src_offset; - - boolean_t src_needs_copy; /* Should source map - * be made read-only - * for copy-on-write? - */ - - boolean_t new_entry_needs_copy; /* Will new entry be COW? */ - - boolean_t was_wired; /* Was source wired? */ - vm_map_version_t version; /* Version before locks - * dropped to make copy - */ - kern_return_t result; /* Return value from - * copy_strategically. - */ - while(tmp_entry->is_sub_map) { + vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ + vm_map_size_t src_size; /* Size of source + * map entry (in both + * maps) + */ + + vm_object_t src_object; /* Object to copy */ + vm_object_offset_t src_offset; + + boolean_t src_needs_copy; /* Should source map + * be made read-only + * for copy-on-write? + */ + + boolean_t new_entry_needs_copy; /* Will new entry be COW? */ + + boolean_t was_wired; /* Was source wired? */ + vm_map_version_t version; /* Version before locks + * dropped to make copy + */ + kern_return_t result; /* Return value from + * copy_strategically. + */ + while (tmp_entry->is_sub_map) { vm_map_size_t submap_len; submap_map_t *ptr; @@ -7525,31 +11311,34 @@ vm_map_copyin_common( ptr->base_start = src_start; ptr->base_end = src_end; submap_len = tmp_entry->vme_end - src_start; - if(submap_len > (src_end-src_start)) - submap_len = src_end-src_start; + if (submap_len > (src_end - src_start)) { + submap_len = src_end - src_start; + } ptr->base_len = submap_len; - + src_start -= tmp_entry->vme_start; - src_start += tmp_entry->offset; + src_start += VME_OFFSET(tmp_entry); src_end = src_start + submap_len; - src_map = tmp_entry->object.sub_map; + src_map = VME_SUBMAP(tmp_entry); vm_map_lock(src_map); /* keep an outstanding reference for all maps in */ /* the parents tree except the base map */ vm_map_reference(src_map); vm_map_unlock(ptr->parent_map); if (!vm_map_lookup_entry( - src_map, src_start, &tmp_entry)) + src_map, src_start, &tmp_entry)) { RETURN(KERN_INVALID_ADDRESS); + } map_share = TRUE; - if(!tmp_entry->is_sub_map) + if (!tmp_entry->is_sub_map) { vm_map_clip_start(src_map, tmp_entry, src_start); + } src_entry = tmp_entry; } /* we are now in the lowest level submap... */ - if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) && - (tmp_entry->object.vm_object->phys_contiguous)) { + if ((VME_OBJECT(tmp_entry) != VM_OBJECT_NULL) && + (VME_OBJECT(tmp_entry)->phys_contiguous)) { /* This is not, supported for now.In future */ /* we will need to detect the phys_contig */ /* condition and then upgrade copy_slowly */ @@ -7560,7 +11349,7 @@ vm_map_copyin_common( RETURN(KERN_PROTECTION_FAILURE); } /* - * Create a new address map entry to hold the result. + * Create a new address map entry to hold the result. * Fill in the fields from the appropriate source entries. * We must unlock the source map to do this if we need * to allocate a map entry. @@ -7569,16 +11358,17 @@ vm_map_copyin_common( version.main_timestamp = src_map->timestamp; vm_map_unlock(src_map); - new_entry = vm_map_copy_entry_create(copy); + new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable); vm_map_lock(src_map); if ((version.main_timestamp + 1) != src_map->timestamp) { if (!vm_map_lookup_entry(src_map, src_start, - &tmp_entry)) { + &tmp_entry)) { RETURN(KERN_INVALID_ADDRESS); } - if (!tmp_entry->is_sub_map) + if (!tmp_entry->is_sub_map) { vm_map_clip_start(src_map, tmp_entry, src_start); + } continue; /* restart w/ new tmp_entry */ } } @@ -7587,9 +11377,10 @@ vm_map_copyin_common( * Verify that the region can be read. */ if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE && - !use_maxprot) || - (src_entry->max_protection & VM_PROT_READ) == 0) + !use_maxprot) || + (src_entry->max_protection & VM_PROT_READ) == 0) { RETURN(KERN_PROTECTION_FAILURE); + } /* * Clip against the endpoints of the entire region. @@ -7598,53 +11389,57 @@ vm_map_copyin_common( vm_map_clip_end(src_map, src_entry, src_end); src_size = src_entry->vme_end - src_start; - src_object = src_entry->object.vm_object; - src_offset = src_entry->offset; + src_object = VME_OBJECT(src_entry); + src_offset = VME_OFFSET(src_entry); was_wired = (src_entry->wired_count != 0); vm_map_entry_copy(new_entry, src_entry); - new_entry->use_pmap = FALSE; /* clr address space specifics */ + if (new_entry->is_sub_map) { + /* clr address space specifics */ + new_entry->use_pmap = FALSE; + } else { + /* + * We're dealing with a copy-on-write operation, + * so the resulting mapping should not inherit the + * original mapping's accounting settings. + * "iokit_acct" should have been cleared in + * vm_map_entry_copy(). + * "use_pmap" should be reset to its default (TRUE) + * so that the new mapping gets accounted for in + * the task's memory footprint. + */ + assert(!new_entry->iokit_acct); + new_entry->use_pmap = TRUE; + } /* * Attempt non-blocking copy-on-write optimizations. */ - if (src_destroy && - (src_object == VM_OBJECT_NULL || - (src_object->internal && !src_object->true_share - && !map_share))) { - /* - * If we are destroying the source, and the object - * is internal, we can move the object reference - * from the source to the copy. The copy is - * copy-on-write only if the source is. - * We make another reference to the object, because - * destroying the source entry will deallocate it. - */ - vm_object_reference(src_object); - - /* - * Copy is always unwired. vm_map_copy_entry - * set its wired count to zero. - */ - - goto CopySuccessful; - } - + /* + * If we are destroying the source, and the object + * is internal, we could move the object reference + * from the source to the copy. The copy is + * copy-on-write only if the source is. + * We make another reference to the object, because + * destroying the source entry will deallocate it. + * + * This memory transfer has to be atomic, (to prevent + * the VM object from being shared or copied while + * it's being moved here), so we could only do this + * if we won't have to unlock the VM map until the + * original mapping has been fully removed. + */ - RestartCopy: - XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n", - src_object, new_entry, new_entry->object.vm_object, - was_wired, 0); +RestartCopy: if ((src_object == VM_OBJECT_NULL || - (!was_wired && !map_share && !tmp_entry->is_shared)) && + (!was_wired && !map_share && !tmp_entry->is_shared)) && vm_object_copy_quickly( - &new_entry->object.vm_object, + VME_OBJECT_PTR(new_entry), src_offset, src_size, &src_needs_copy, &new_entry_needs_copy)) { - new_entry->needs_copy = new_entry_needs_copy; /* @@ -7652,23 +11447,26 @@ vm_map_copyin_common( */ if (src_needs_copy && !tmp_entry->needs_copy) { - vm_prot_t prot; + vm_prot_t prot; prot = src_entry->protection & ~VM_PROT_WRITE; - if (override_nx(src_map, src_entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(src_map, VME_ALIAS(src_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } vm_object_pmap_protect( src_object, src_offset, src_size, - (src_entry->is_shared ? - PMAP_NULL - : src_map->pmap), + (src_entry->is_shared ? + PMAP_NULL + : src_map->pmap), src_entry->vme_start, prot); + assert(tmp_entry->wired_count == 0); tmp_entry->needs_copy = TRUE; } @@ -7681,6 +11479,8 @@ vm_map_copyin_common( goto CopySuccessful; } + entry_was_shared = tmp_entry->is_shared; + /* * Take an object reference, so that we may * release the map lock(s). @@ -7695,53 +11495,96 @@ vm_map_copyin_common( */ version.main_timestamp = src_map->timestamp; - vm_map_unlock(src_map); /* Increments timestamp once! */ + vm_map_unlock(src_map); /* Increments timestamp once! */ + saved_src_entry = src_entry; + tmp_entry = VM_MAP_ENTRY_NULL; + src_entry = VM_MAP_ENTRY_NULL; /* * Perform the copy */ if (was_wired) { - CopySlowly: +CopySlowly: vm_object_lock(src_object); result = vm_object_copy_slowly( src_object, src_offset, src_size, THREAD_UNINT, - &new_entry->object.vm_object); - new_entry->offset = 0; + VME_OBJECT_PTR(new_entry)); + VME_OFFSET_SET(new_entry, 0); new_entry->needs_copy = FALSE; - - } - else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && - (tmp_entry->is_shared || map_share)) { - vm_object_t new_object; + } else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && + (entry_was_shared || map_share)) { + vm_object_t new_object; vm_object_lock_shared(src_object); new_object = vm_object_copy_delayed( src_object, - src_offset, + src_offset, src_size, TRUE); - if (new_object == VM_OBJECT_NULL) - goto CopySlowly; + if (new_object == VM_OBJECT_NULL) { + goto CopySlowly; + } - new_entry->object.vm_object = new_object; + VME_OBJECT_SET(new_entry, new_object); + assert(new_entry->wired_count == 0); new_entry->needs_copy = TRUE; + assert(!new_entry->iokit_acct); + assert(new_object->purgable == VM_PURGABLE_DENY); + assertf(new_entry->use_pmap, "src_map %p new_entry %p\n", src_map, new_entry); result = KERN_SUCCESS; - } else { + vm_object_offset_t new_offset; + new_offset = VME_OFFSET(new_entry); result = vm_object_copy_strategically(src_object, - src_offset, - src_size, - &new_entry->object.vm_object, - &new_entry->offset, - &new_entry_needs_copy); + src_offset, + src_size, + VME_OBJECT_PTR(new_entry), + &new_offset, + &new_entry_needs_copy); + if (new_offset != VME_OFFSET(new_entry)) { + VME_OFFSET_SET(new_entry, new_offset); + } new_entry->needs_copy = new_entry_needs_copy; } + if (result == KERN_SUCCESS && + preserve_purgeable && + src_object->purgable != VM_PURGABLE_DENY) { + vm_object_t new_object; + + new_object = VME_OBJECT(new_entry); + assert(new_object != src_object); + vm_object_lock(new_object); + assert(new_object->ref_count == 1); + assert(new_object->shadow == VM_OBJECT_NULL); + assert(new_object->copy == VM_OBJECT_NULL); + assert(new_object->vo_owner == NULL); + + new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + new_object->true_share = TRUE; + /* start as non-volatile with no owner... */ + new_object->purgable = VM_PURGABLE_NONVOLATILE; + vm_purgeable_nonvolatile_enqueue(new_object, NULL); + /* ... and move to src_object's purgeable state */ + if (src_object->purgable != VM_PURGABLE_NONVOLATILE) { + int state; + state = src_object->purgable; + vm_object_purgable_control( + new_object, + VM_PURGABLE_SET_STATE_FROM_KERNEL, + &state); + } + vm_object_unlock(new_object); + new_object = VM_OBJECT_NULL; + /* no pmap accounting for purgeable objects */ + new_entry->use_pmap = FALSE; + } + if (result != KERN_SUCCESS && result != KERN_MEMORY_RESTART_COPY) { vm_map_lock(src_map); @@ -7761,8 +11604,11 @@ vm_map_copyin_common( vm_map_lock(src_map); - if ((version.main_timestamp + 1) == src_map->timestamp) + if ((version.main_timestamp + 1) == src_map->timestamp) { + /* src_map hasn't changed: src_entry is still valid */ + src_entry = saved_src_entry; goto VerificationSuccessful; + } /* * Simple version comparison failed. @@ -7777,6 +11623,13 @@ vm_map_copyin_common( */ if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) { + if (result != KERN_MEMORY_RESTART_COPY) { + vm_object_deallocate(VME_OBJECT(new_entry)); + VME_OBJECT_SET(new_entry, VM_OBJECT_NULL); + /* reset accounting state */ + new_entry->iokit_acct = FALSE; + new_entry->use_pmap = TRUE; + } RETURN(KERN_INVALID_ADDRESS); } @@ -7784,25 +11637,54 @@ vm_map_copyin_common( vm_map_clip_start(src_map, src_entry, src_start); if ((((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE) && - !use_maxprot) || - ((src_entry->max_protection & VM_PROT_READ) == 0)) + !use_maxprot) || + ((src_entry->max_protection & VM_PROT_READ) == 0)) { goto VerificationFailed; + } - if (src_entry->vme_end < new_entry->vme_end) - src_size = (new_entry->vme_end = src_entry->vme_end) - src_start; - - if ((src_entry->object.vm_object != src_object) || - (src_entry->offset != src_offset) ) { - + if (src_entry->vme_end < new_entry->vme_end) { /* - * Verification failed. + * This entry might have been shortened + * (vm_map_clip_end) or been replaced with + * an entry that ends closer to "src_start" + * than before. + * Adjust "new_entry" accordingly; copying + * less memory would be correct but we also + * redo the copy (see below) if the new entry + * no longer points at the same object/offset. + */ + assert(VM_MAP_PAGE_ALIGNED(src_entry->vme_end, + VM_MAP_COPY_PAGE_MASK(copy))); + new_entry->vme_end = src_entry->vme_end; + src_size = new_entry->vme_end - src_start; + } else if (src_entry->vme_end > new_entry->vme_end) { + /* + * This entry might have been extended + * (vm_map_entry_simplify() or coalesce) + * or been replaced with an entry that ends farther + * from "src_start" than before. * - * Start over with this top-level entry. + * We've called vm_object_copy_*() only on + * the previous range, so we can't + * just extend new_entry. We have to re-do + * the copy based on the new entry as if it was + * pointing at a different object/offset (see + * "Verification failed" below). + */ + } + + if ((VME_OBJECT(src_entry) != src_object) || + (VME_OFFSET(src_entry) != src_offset) || + (src_entry->vme_end > new_entry->vme_end)) { + /* + * Verification failed. + * + * Start over with this top-level entry. */ - VerificationFailed: ; +VerificationFailed: ; - vm_object_deallocate(new_entry->object.vm_object); + vm_object_deallocate(VME_OBJECT(new_entry)); tmp_entry = src_entry; continue; } @@ -7811,24 +11693,25 @@ vm_map_copyin_common( * Verification succeeded. */ - VerificationSuccessful: ; +VerificationSuccessful:; - if (result == KERN_MEMORY_RESTART_COPY) + if (result == KERN_MEMORY_RESTART_COPY) { goto RestartCopy; + } /* * Copy succeeded. */ - CopySuccessful: ; +CopySuccessful: ; /* * Link in the new copy entry. */ vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), - new_entry); - + new_entry); + /* * Determine whether the entire region * has been copied. @@ -7837,87 +11720,328 @@ vm_map_copyin_common( src_start = new_entry->vme_end; new_entry = VM_MAP_ENTRY_NULL; while ((src_start >= src_end) && (src_end != 0)) { - if (src_map != base_map) { - submap_map_t *ptr; - - ptr = parent_maps; - assert(ptr != NULL); - parent_maps = parent_maps->next; - - /* fix up the damage we did in that submap */ - vm_map_simplify_range(src_map, - src_base, - src_end); - - vm_map_unlock(src_map); - vm_map_deallocate(src_map); - vm_map_lock(ptr->parent_map); - src_map = ptr->parent_map; - src_base = ptr->base_start; - src_start = ptr->base_start + ptr->base_len; - src_end = ptr->base_end; - if ((src_end > src_start) && - !vm_map_lookup_entry( - src_map, src_start, &tmp_entry)) - RETURN(KERN_INVALID_ADDRESS); - kfree(ptr, sizeof(submap_map_t)); - if(parent_maps == NULL) - map_share = FALSE; - src_entry = tmp_entry->vme_prev; - } else + submap_map_t *ptr; + + if (src_map == base_map) { + /* back to the top */ break; + } + + ptr = parent_maps; + assert(ptr != NULL); + parent_maps = parent_maps->next; + + /* fix up the damage we did in that submap */ + vm_map_simplify_range(src_map, + src_base, + src_end); + + vm_map_unlock(src_map); + vm_map_deallocate(src_map); + vm_map_lock(ptr->parent_map); + src_map = ptr->parent_map; + src_base = ptr->base_start; + src_start = ptr->base_start + ptr->base_len; + src_end = ptr->base_end; + if (!vm_map_lookup_entry(src_map, + src_start, + &tmp_entry) && + (src_end > src_start)) { + RETURN(KERN_INVALID_ADDRESS); + } + kfree(ptr, sizeof(submap_map_t)); + if (parent_maps == NULL) { + map_share = FALSE; + } + src_entry = tmp_entry->vme_prev; + } + + if ((VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) && + (src_start >= src_addr + len) && + (src_addr + len != 0)) { + /* + * Stop copying now, even though we haven't reached + * "src_end". We'll adjust the end of the last copy + * entry at the end, if needed. + * + * If src_map's aligment is different from the + * system's page-alignment, there could be + * extra non-map-aligned map entries between + * the original (non-rounded) "src_addr + len" + * and the rounded "src_end". + * We do not want to copy those map entries since + * they're not part of the copied range. + */ + break; } - if ((src_start >= src_end) && (src_end != 0)) + + if ((src_start >= src_end) && (src_end != 0)) { break; + } /* * Verify that there are no gaps in the region */ tmp_entry = src_entry->vme_next; - if ((tmp_entry->vme_start != src_start) || - (tmp_entry == vm_map_to_entry(src_map))) + if ((tmp_entry->vme_start != src_start) || + (tmp_entry == vm_map_to_entry(src_map))) { RETURN(KERN_INVALID_ADDRESS); + } } /* * If the source should be destroyed, do it now, since the - * copy was successful. + * copy was successful. */ if (src_destroy) { - (void) vm_map_delete(src_map, - vm_map_trunc_page(src_addr), - src_end, - (src_map == kernel_map) ? - VM_MAP_REMOVE_KUNWIRE : - VM_MAP_NO_FLAGS, - VM_MAP_NULL); + (void) vm_map_delete( + src_map, + vm_map_trunc_page(src_addr, + VM_MAP_PAGE_MASK(src_map)), + src_end, + ((src_map == kernel_map) ? + VM_MAP_REMOVE_KUNWIRE : + VM_MAP_REMOVE_NO_FLAGS), + VM_MAP_NULL); } else { /* fix up the damage we did in the base map */ - vm_map_simplify_range(src_map, - vm_map_trunc_page(src_addr), - vm_map_round_page(src_end)); + vm_map_simplify_range( + src_map, + vm_map_trunc_page(src_addr, + VM_MAP_PAGE_MASK(src_map)), + vm_map_round_page(src_end, + VM_MAP_PAGE_MASK(src_map))); } vm_map_unlock(src_map); + tmp_entry = VM_MAP_ENTRY_NULL; + + if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) { + vm_map_offset_t original_start, original_offset, original_end; + + assert(VM_MAP_COPY_PAGE_MASK(copy) == PAGE_MASK); + + /* adjust alignment of first copy_entry's "vme_start" */ + tmp_entry = vm_map_copy_first_entry(copy); + if (tmp_entry != vm_map_copy_to_entry(copy)) { + vm_map_offset_t adjustment; + + original_start = tmp_entry->vme_start; + original_offset = VME_OFFSET(tmp_entry); + + /* map-align the start of the first copy entry... */ + adjustment = (tmp_entry->vme_start - + vm_map_trunc_page( + tmp_entry->vme_start, + VM_MAP_PAGE_MASK(src_map))); + tmp_entry->vme_start -= adjustment; + VME_OFFSET_SET(tmp_entry, + VME_OFFSET(tmp_entry) - adjustment); + copy_addr -= adjustment; + assert(tmp_entry->vme_start < tmp_entry->vme_end); + /* ... adjust for mis-aligned start of copy range */ + adjustment = + (vm_map_trunc_page(copy->offset, + PAGE_MASK) - + vm_map_trunc_page(copy->offset, + VM_MAP_PAGE_MASK(src_map))); + if (adjustment) { + assert(page_aligned(adjustment)); + assert(adjustment < VM_MAP_PAGE_SIZE(src_map)); + tmp_entry->vme_start += adjustment; + VME_OFFSET_SET(tmp_entry, + (VME_OFFSET(tmp_entry) + + adjustment)); + copy_addr += adjustment; + assert(tmp_entry->vme_start < tmp_entry->vme_end); + } + + /* + * Assert that the adjustments haven't exposed + * more than was originally copied... + */ + assert(tmp_entry->vme_start >= original_start); + assert(VME_OFFSET(tmp_entry) >= original_offset); + /* + * ... and that it did not adjust outside of a + * a single 16K page. + */ + assert(vm_map_trunc_page(tmp_entry->vme_start, + VM_MAP_PAGE_MASK(src_map)) == + vm_map_trunc_page(original_start, + VM_MAP_PAGE_MASK(src_map))); + } + + /* adjust alignment of last copy_entry's "vme_end" */ + tmp_entry = vm_map_copy_last_entry(copy); + if (tmp_entry != vm_map_copy_to_entry(copy)) { + vm_map_offset_t adjustment; + + original_end = tmp_entry->vme_end; + + /* map-align the end of the last copy entry... */ + tmp_entry->vme_end = + vm_map_round_page(tmp_entry->vme_end, + VM_MAP_PAGE_MASK(src_map)); + /* ... adjust for mis-aligned end of copy range */ + adjustment = + (vm_map_round_page((copy->offset + + copy->size), + VM_MAP_PAGE_MASK(src_map)) - + vm_map_round_page((copy->offset + + copy->size), + PAGE_MASK)); + if (adjustment) { + assert(page_aligned(adjustment)); + assert(adjustment < VM_MAP_PAGE_SIZE(src_map)); + tmp_entry->vme_end -= adjustment; + assert(tmp_entry->vme_start < tmp_entry->vme_end); + } + + /* + * Assert that the adjustments haven't exposed + * more than was originally copied... + */ + assert(tmp_entry->vme_end <= original_end); + /* + * ... and that it did not adjust outside of a + * a single 16K page. + */ + assert(vm_map_round_page(tmp_entry->vme_end, + VM_MAP_PAGE_MASK(src_map)) == + vm_map_round_page(original_end, + VM_MAP_PAGE_MASK(src_map))); + } + } /* Fix-up start and end points in copy. This is necessary */ /* when the various entries in the copy object were picked */ /* up from different sub-maps */ tmp_entry = vm_map_copy_first_entry(copy); + copy_size = 0; /* compute actual size */ while (tmp_entry != vm_map_copy_to_entry(copy)) { - tmp_entry->vme_end = copy_addr + - (tmp_entry->vme_end - tmp_entry->vme_start); + assert(VM_MAP_PAGE_ALIGNED( + copy_addr + (tmp_entry->vme_end - + tmp_entry->vme_start), + VM_MAP_COPY_PAGE_MASK(copy))); + assert(VM_MAP_PAGE_ALIGNED( + copy_addr, + VM_MAP_COPY_PAGE_MASK(copy))); + + /* + * The copy_entries will be injected directly into the + * destination map and might not be "map aligned" there... + */ + tmp_entry->map_aligned = FALSE; + + tmp_entry->vme_end = copy_addr + + (tmp_entry->vme_end - tmp_entry->vme_start); tmp_entry->vme_start = copy_addr; + assert(tmp_entry->vme_start < tmp_entry->vme_end); copy_addr += tmp_entry->vme_end - tmp_entry->vme_start; + copy_size += tmp_entry->vme_end - tmp_entry->vme_start; tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next; } + if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT && + copy_size < copy->size) { + /* + * The actual size of the VM map copy is smaller than what + * was requested by the caller. This must be because some + * PAGE_SIZE-sized pages are missing at the end of the last + * VM_MAP_PAGE_SIZE(src_map)-sized chunk of the range. + * The caller might not have been aware of those missing + * pages and might not want to be aware of it, which is + * fine as long as they don't try to access (and crash on) + * those missing pages. + * Let's adjust the size of the "copy", to avoid failing + * in vm_map_copyout() or vm_map_copy_overwrite(). + */ + assert(vm_map_round_page(copy_size, + VM_MAP_PAGE_MASK(src_map)) == + vm_map_round_page(copy->size, + VM_MAP_PAGE_MASK(src_map))); + copy->size = copy_size; + } + *copy_result = copy; - return(KERN_SUCCESS); + return KERN_SUCCESS; + +#undef RETURN +} + +kern_return_t +vm_map_copy_extract( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + vm_map_copy_t *copy_result, /* OUT */ + vm_prot_t *cur_prot, /* OUT */ + vm_prot_t *max_prot) +{ + vm_map_offset_t src_start, src_end; + vm_map_copy_t copy; + kern_return_t kr; + + /* + * Check for copies of zero bytes. + */ + + if (len == 0) { + *copy_result = VM_MAP_COPY_NULL; + return KERN_SUCCESS; + } + + /* + * Check that the end address doesn't overflow + */ + src_end = src_addr + len; + if (src_end < src_addr) { + return KERN_INVALID_ADDRESS; + } + + /* + * Compute (page aligned) start and end of region + */ + src_start = vm_map_trunc_page(src_addr, PAGE_MASK); + src_end = vm_map_round_page(src_end, PAGE_MASK); + + /* + * Allocate a header element for the list. + * + * Use the start and end in the header to + * remember the endpoints prior to rounding. + */ + + copy = vm_map_copy_allocate(); + copy->type = VM_MAP_COPY_ENTRY_LIST; + copy->cpy_hdr.entries_pageable = TRUE; + + vm_map_store_init(©->cpy_hdr); + + copy->offset = 0; + copy->size = len; + + kr = vm_map_remap_extract(src_map, + src_addr, + len, + FALSE, /* copy */ + ©->cpy_hdr, + cur_prot, + max_prot, + VM_INHERIT_SHARE, + TRUE, /* pageable */ + FALSE, /* same_map */ + VM_MAP_KERNEL_FLAGS_NONE); + if (kr != KERN_SUCCESS) { + vm_map_copy_discard(copy); + return kr; + } -#undef RETURN + *copy_result = copy; + return KERN_SUCCESS; } /* @@ -7929,36 +12053,36 @@ vm_map_copyin_common( kern_return_t vm_map_copyin_object( - vm_object_t object, - vm_object_offset_t offset, /* offset of region in object */ - vm_object_size_t size, /* size of region in object */ - vm_map_copy_t *copy_result) /* OUT */ + vm_object_t object, + vm_object_offset_t offset, /* offset of region in object */ + vm_object_size_t size, /* size of region in object */ + vm_map_copy_t *copy_result) /* OUT */ { - vm_map_copy_t copy; /* Resulting copy */ + vm_map_copy_t copy; /* Resulting copy */ /* * We drop the object into a special copy object * that contains the object directly. */ - copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy = vm_map_copy_allocate(); copy->type = VM_MAP_COPY_OBJECT; copy->cpy_object = object; copy->offset = offset; copy->size = size; *copy_result = copy; - return(KERN_SUCCESS); + return KERN_SUCCESS; } static void vm_map_fork_share( - vm_map_t old_map, - vm_map_entry_t old_entry, - vm_map_t new_map) + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map) { - vm_object_t object; - vm_map_entry_t new_entry; + vm_object_t object; + vm_map_entry_t new_entry; /* * New sharing code. New map entry @@ -7969,48 +12093,47 @@ vm_map_fork_share( * or someone else already has one, then * make a new shadow and share it. */ - - object = old_entry->object.vm_object; + + object = VME_OBJECT(old_entry); if (old_entry->is_sub_map) { assert(old_entry->wired_count == 0); #ifndef NO_NESTED_PMAP - if(old_entry->use_pmap) { - kern_return_t result; - - result = pmap_nest(new_map->pmap, - (old_entry->object.sub_map)->pmap, - (addr64_t)old_entry->vme_start, - (addr64_t)old_entry->vme_start, - (uint64_t)(old_entry->vme_end - old_entry->vme_start)); - if(result) + if (old_entry->use_pmap) { + kern_return_t result; + + result = pmap_nest(new_map->pmap, + (VME_SUBMAP(old_entry))->pmap, + (addr64_t)old_entry->vme_start, + (addr64_t)old_entry->vme_start, + (uint64_t)(old_entry->vme_end - old_entry->vme_start)); + if (result) { panic("vm_map_fork_share: pmap_nest failed!"); + } } -#endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ } else if (object == VM_OBJECT_NULL) { object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end - - old_entry->vme_start)); - old_entry->offset = 0; - old_entry->object.vm_object = object; - assert(!old_entry->needs_copy); + old_entry->vme_start)); + VME_OFFSET_SET(old_entry, 0); + VME_OBJECT_SET(old_entry, object); + old_entry->use_pmap = TRUE; +// assert(!old_entry->needs_copy); } else if (object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC) { - + MEMORY_OBJECT_COPY_SYMMETRIC) { /* * We are already using an asymmetric * copy, and therefore we already have * the right object. */ - - assert(! old_entry->needs_copy); - } - else if (old_entry->needs_copy || /* case 1 */ - object->shadowed || /* case 2 */ - (!object->true_share && /* case 3 */ - !old_entry->is_shared && - (object->vo_size > - (vm_map_size_t)(old_entry->vme_end - - old_entry->vme_start)))) { - + + assert(!old_entry->needs_copy); + } else if (old_entry->needs_copy || /* case 1 */ + object->shadowed || /* case 2 */ + (!object->true_share && /* case 3 */ + !old_entry->is_shared && + (object->vo_size > + (vm_map_size_t)(old_entry->vme_end - + old_entry->vme_start)))) { /* * We need to create a shadow. * There are three cases here. @@ -8087,11 +12210,10 @@ vm_map_fork_share( * (This is a preemptive version of * case 2.) */ - vm_object_shadow(&old_entry->object.vm_object, - &old_entry->offset, - (vm_map_size_t) (old_entry->vme_end - - old_entry->vme_start)); - + VME_OBJECT_SHADOW(old_entry, + (vm_map_size_t) (old_entry->vme_end - + old_entry->vme_start)); + /* * If we're making a shadow for other than * copy on write reasons, then we have @@ -8100,35 +12222,41 @@ vm_map_fork_share( if (!old_entry->needs_copy && (old_entry->protection & VM_PROT_WRITE)) { - vm_prot_t prot; + vm_prot_t prot; + + assert(!pmap_has_prot_policy(old_entry->protection)); prot = old_entry->protection & ~VM_PROT_WRITE; - if (override_nx(old_map, old_entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + assert(!pmap_has_prot_policy(prot)); + + if (override_nx(old_map, VME_ALIAS(old_entry)) && prot) { + prot |= VM_PROT_EXECUTE; + } + - if (old_map->mapped) { + if (old_map->mapped_in_other_pmaps) { vm_object_pmap_protect( - old_entry->object.vm_object, - old_entry->offset, + VME_OBJECT(old_entry), + VME_OFFSET(old_entry), (old_entry->vme_end - - old_entry->vme_start), + old_entry->vme_start), PMAP_NULL, old_entry->vme_start, prot); } else { pmap_protect(old_map->pmap, - old_entry->vme_start, - old_entry->vme_end, - prot); + old_entry->vme_start, + old_entry->vme_end, + prot); } } - + old_entry->needs_copy = FALSE; - object = old_entry->object.vm_object; + object = VME_OBJECT(old_entry); } - + /* * If object was using a symmetric copy strategy, * change its copy strategy to the default @@ -8137,11 +12265,11 @@ vm_map_fork_share( * norma case. Bump the reference count for the * new entry. */ - - if(old_entry->is_sub_map) { - vm_map_lock(old_entry->object.sub_map); - vm_map_reference(old_entry->object.sub_map); - vm_map_unlock(old_entry->object.sub_map); + + if (old_entry->is_sub_map) { + vm_map_lock(VME_SUBMAP(old_entry)); + vm_map_reference(VME_SUBMAP(old_entry)); + vm_map_unlock(VME_SUBMAP(old_entry)); } else { vm_object_lock(object); vm_object_reference_locked(object); @@ -8150,43 +12278,65 @@ vm_map_fork_share( } vm_object_unlock(object); } - + /* * Clone the entry, using object ref from above. * Mark both entries as shared. */ - - new_entry = vm_map_entry_create(new_map); + + new_entry = vm_map_entry_create(new_map, FALSE); /* Never the kernel + * map or descendants */ vm_map_entry_copy(new_entry, old_entry); old_entry->is_shared = TRUE; new_entry->is_shared = TRUE; - + + /* + * We're dealing with a shared mapping, so the resulting mapping + * should inherit some of the original mapping's accounting settings. + * "iokit_acct" should have been cleared in vm_map_entry_copy(). + * "use_pmap" should stay the same as before (if it hasn't been reset + * to TRUE when we cleared "iokit_acct"). + */ + assert(!new_entry->iokit_acct); + + /* + * If old entry's inheritence is VM_INHERIT_NONE, + * the new entry is for corpse fork, remove the + * write permission from the new entry. + */ + if (old_entry->inheritance == VM_INHERIT_NONE) { + new_entry->protection &= ~VM_PROT_WRITE; + new_entry->max_protection &= ~VM_PROT_WRITE; + } + /* * Insert the entry into the new map -- we * know we're inserting at the end of the new * map. */ - - vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), new_entry); - + + vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), new_entry, + VM_MAP_KERNEL_FLAGS_NONE); + /* * Update the physical map */ - + if (old_entry->is_sub_map) { /* Bill Angell pmap support goes here */ } else { pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start, - old_entry->vme_end - old_entry->vme_start, - old_entry->vme_start); + old_entry->vme_end - old_entry->vme_start, + old_entry->vme_start); } } static boolean_t vm_map_fork_copy( - vm_map_t old_map, - vm_map_entry_t *old_entry_p, - vm_map_t new_map) + vm_map_t old_map, + vm_map_entry_t *old_entry_p, + vm_map_t new_map, + int vm_map_copyin_flags) { vm_map_entry_t old_entry = *old_entry_p; vm_map_size_t entry_size = old_entry->vme_end - old_entry->vme_start; @@ -8201,7 +12351,9 @@ vm_map_fork_copy( * be accessed, not just whether it's accessible * right now. */ - if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, ©) + vm_map_copyin_flags |= VM_MAP_COPYIN_USE_MAXPROT; + if (vm_map_copyin_internal(old_map, start, entry_size, + vm_map_copyin_flags, ©) != KERN_SUCCESS) { /* * The map might have changed while it @@ -8222,24 +12374,24 @@ vm_map_fork_copy( * that INVALID_ADDRESS and * PROTECTION_FAILURE are handled above. */ - + return FALSE; } - + /* * Insert the copy into the new map */ - + vm_map_copy_insert(new_map, last, copy); - + /* * Pick up the traversal at the end of * the copied region. */ - + vm_map_lock(old_map); start += entry_size; - if (! vm_map_lookup_entry(old_map, start, &last)) { + if (!vm_map_lookup_entry(old_map, start, &last)) { last = last->vme_next; } else { if (last->vme_start == start) { @@ -8262,49 +12414,111 @@ vm_map_fork_copy( * * Create and return a new map based on the old * map, according to the inheritance values on the - * regions in that map. + * regions in that map and the options. * * The source map must not be locked. */ vm_map_t vm_map_fork( - vm_map_t old_map) + ledger_t ledger, + vm_map_t old_map, + int options) { - pmap_t new_pmap; - vm_map_t new_map; - vm_map_entry_t old_entry; - vm_map_size_t new_size = 0, entry_size; - vm_map_entry_t new_entry; - boolean_t src_needs_copy; - boolean_t new_entry_needs_copy; + pmap_t new_pmap; + vm_map_t new_map; + vm_map_entry_t old_entry; + vm_map_size_t new_size = 0, entry_size; + vm_map_entry_t new_entry; + boolean_t src_needs_copy; + boolean_t new_entry_needs_copy; + boolean_t pmap_is64bit; + int vm_map_copyin_flags; + vm_inherit_t old_entry_inheritance; + int map_create_options; + kern_return_t footprint_collect_kr; + + if (options & ~(VM_MAP_FORK_SHARE_IF_INHERIT_NONE | + VM_MAP_FORK_PRESERVE_PURGEABLE | + VM_MAP_FORK_CORPSE_FOOTPRINT)) { + /* unsupported option */ + return VM_MAP_NULL; + } - new_pmap = pmap_create((vm_map_size_t) 0, + pmap_is64bit = #if defined(__i386__) || defined(__x86_64__) - old_map->pmap->pm_task_map != TASK_MAP_32BIT + old_map->pmap->pm_task_map != TASK_MAP_32BIT; +#elif defined(__arm64__) + old_map->pmap->max == MACH_VM_MAX_ADDRESS; +#elif defined(__arm__) + FALSE; #else - 0 +#error Unknown architecture. #endif - ); -#if defined(__i386__) - if (old_map->pmap->pm_task_map == TASK_MAP_64BIT_SHARED) - pmap_set_4GB_pagezero(new_pmap); + + unsigned int pmap_flags = 0; + pmap_flags |= pmap_is64bit ? PMAP_CREATE_64BIT : 0; +#if defined(HAS_APPLE_PAC) + pmap_flags |= old_map->pmap->disable_jop ? PMAP_CREATE_DISABLE_JOP : 0; #endif + new_pmap = pmap_create_options(ledger, (vm_map_size_t) 0, pmap_flags); vm_map_reference_swap(old_map); vm_map_lock(old_map); - new_map = vm_map_create(new_pmap, - old_map->min_offset, - old_map->max_offset, - old_map->hdr.entries_pageable); + map_create_options = 0; + if (old_map->hdr.entries_pageable) { + map_create_options |= VM_MAP_CREATE_PAGEABLE; + } + if (options & VM_MAP_FORK_CORPSE_FOOTPRINT) { + map_create_options |= VM_MAP_CREATE_CORPSE_FOOTPRINT; + footprint_collect_kr = KERN_SUCCESS; + } + new_map = vm_map_create_options(new_pmap, + old_map->min_offset, + old_map->max_offset, + map_create_options); + vm_map_lock(new_map); + vm_commit_pagezero_status(new_map); + /* inherit the parent map's page size */ + vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(old_map)); for ( old_entry = vm_map_first_entry(old_map); old_entry != vm_map_to_entry(old_map); ) { - entry_size = old_entry->vme_end - old_entry->vme_start; - switch (old_entry->inheritance) { + old_entry_inheritance = old_entry->inheritance; + /* + * If caller used the VM_MAP_FORK_SHARE_IF_INHERIT_NONE option + * share VM_INHERIT_NONE entries that are not backed by a + * device pager. + */ + if (old_entry_inheritance == VM_INHERIT_NONE && + (options & VM_MAP_FORK_SHARE_IF_INHERIT_NONE) && + !(!old_entry->is_sub_map && + VME_OBJECT(old_entry) != NULL && + VME_OBJECT(old_entry)->pager != NULL && + is_device_pager_ops( + VME_OBJECT(old_entry)->pager->mo_pager_ops))) { + old_entry_inheritance = VM_INHERIT_SHARE; + } + + if (old_entry_inheritance != VM_INHERIT_NONE && + (options & VM_MAP_FORK_CORPSE_FOOTPRINT) && + footprint_collect_kr == KERN_SUCCESS) { + /* + * The corpse won't have old_map->pmap to query + * footprint information, so collect that data now + * and store it in new_map->vmmap_corpse_footprint + * for later autopsy. + */ + footprint_collect_kr = + vm_map_corpse_footprint_collect(old_map, + old_entry, + new_map); + } + + switch (old_entry_inheritance) { case VM_INHERIT_NONE: break; @@ -8321,24 +12535,40 @@ vm_map_fork( * to vm_map_fork_copy. */ - if(old_entry->is_sub_map) + if (old_entry->is_sub_map) { break; + } if ((old_entry->wired_count != 0) || - ((old_entry->object.vm_object != NULL) && - (old_entry->object.vm_object->true_share))) { + ((VME_OBJECT(old_entry) != NULL) && + (VME_OBJECT(old_entry)->true_share))) { goto slow_vm_map_fork_copy; } - new_entry = vm_map_entry_create(new_map); + new_entry = vm_map_entry_create(new_map, FALSE); /* never the kernel map or descendants */ vm_map_entry_copy(new_entry, old_entry); - /* clear address space specifics */ - new_entry->use_pmap = FALSE; + if (new_entry->is_sub_map) { + /* clear address space specifics */ + new_entry->use_pmap = FALSE; + } else { + /* + * We're dealing with a copy-on-write operation, + * so the resulting mapping should not inherit + * the original mapping's accounting settings. + * "iokit_acct" should have been cleared in + * vm_map_entry_copy(). + * "use_pmap" should be reset to its default + * (TRUE) so that the new mapping gets + * accounted for in the task's memory footprint. + */ + assert(!new_entry->iokit_acct); + new_entry->use_pmap = TRUE; + } - if (! vm_object_copy_quickly( - &new_entry->object.vm_object, - old_entry->offset, + if (!vm_object_copy_quickly( + VME_OBJECT_PTR(new_entry), + VME_OFFSET(old_entry), (old_entry->vme_end - - old_entry->vme_start), + old_entry->vme_start), &src_needs_copy, &new_entry_needs_copy)) { vm_map_entry_dispose(new_map, new_entry); @@ -8348,43 +12578,60 @@ vm_map_fork( /* * Handle copy-on-write obligations */ - + if (src_needs_copy && !old_entry->needs_copy) { - vm_prot_t prot; + vm_prot_t prot; + + assert(!pmap_has_prot_policy(old_entry->protection)); prot = old_entry->protection & ~VM_PROT_WRITE; - if (override_nx(old_map, old_entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(old_map, VME_ALIAS(old_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } + + assert(!pmap_has_prot_policy(prot)); vm_object_pmap_protect( - old_entry->object.vm_object, - old_entry->offset, + VME_OBJECT(old_entry), + VME_OFFSET(old_entry), (old_entry->vme_end - - old_entry->vme_start), - ((old_entry->is_shared - || old_map->mapped) - ? PMAP_NULL : - old_map->pmap), + old_entry->vme_start), + ((old_entry->is_shared + || old_map->mapped_in_other_pmaps) + ? PMAP_NULL : + old_map->pmap), old_entry->vme_start, prot); + assert(old_entry->wired_count == 0); old_entry->needs_copy = TRUE; } new_entry->needs_copy = new_entry_needs_copy; - + /* * Insert the entry at the end * of the map. */ - - vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), - new_entry); + + vm_map_store_entry_link(new_map, + vm_map_last_entry(new_map), + new_entry, + VM_MAP_KERNEL_FLAGS_NONE); new_size += entry_size; break; - slow_vm_map_fork_copy: - if (vm_map_fork_copy(old_map, &old_entry, new_map)) { +slow_vm_map_fork_copy: + vm_map_copyin_flags = 0; + if (options & VM_MAP_FORK_PRESERVE_PURGEABLE) { + vm_map_copyin_flags |= + VM_MAP_COPYIN_PRESERVE_PURGEABLE; + } + if (vm_map_fork_copy(old_map, + &old_entry, + new_map, + vm_map_copyin_flags)) { new_size += entry_size; } continue; @@ -8392,35 +12639,57 @@ vm_map_fork( old_entry = old_entry->vme_next; } +#if defined(__arm64__) + pmap_insert_sharedpage(new_map->pmap); +#endif + new_map->size = new_size; + + if (options & VM_MAP_FORK_CORPSE_FOOTPRINT) { + vm_map_corpse_footprint_collect_done(new_map); + } + + vm_map_unlock(new_map); vm_map_unlock(old_map); vm_map_deallocate(old_map); - return(new_map); + return new_map; } /* * vm_map_exec: * - * Setup the "new_map" with the proper execution environment according + * Setup the "new_map" with the proper execution environment according * to the type of executable (platform, 64bit, chroot environment). * Map the comm page and shared region, etc... */ kern_return_t vm_map_exec( - vm_map_t new_map, - task_t task, - void *fsroot, - cpu_type_t cpu) + vm_map_t new_map, + task_t task, + boolean_t is64bit, + void *fsroot, + cpu_type_t cpu, + cpu_subtype_t cpu_subtype) { SHARED_REGION_TRACE_DEBUG( - ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): ->\n", - current_task(), new_map, task, fsroot, cpu)); - (void) vm_commpage_enter(new_map, task); - (void) vm_shared_region_enter(new_map, task, fsroot, cpu); + ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): ->\n", + (void *)VM_KERNEL_ADDRPERM(current_task()), + (void *)VM_KERNEL_ADDRPERM(new_map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, + cpu_subtype)); + (void) vm_commpage_enter(new_map, task, is64bit); + (void) vm_shared_region_enter(new_map, task, is64bit, fsroot, cpu, cpu_subtype); SHARED_REGION_TRACE_DEBUG( - ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): <-\n", - current_task(), new_map, task, fsroot, cpu)); + ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): <-\n", + (void *)VM_KERNEL_ADDRPERM(current_task()), + (void *)VM_KERNEL_ADDRPERM(new_map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, + cpu_subtype)); return KERN_SUCCESS; } @@ -8450,28 +12719,29 @@ vm_map_exec( */ kern_return_t vm_map_lookup_locked( - vm_map_t *var_map, /* IN/OUT */ - vm_map_offset_t vaddr, - vm_prot_t fault_type, - int object_lock_type, - vm_map_version_t *out_version, /* OUT */ - vm_object_t *object, /* OUT */ - vm_object_offset_t *offset, /* OUT */ - vm_prot_t *out_prot, /* OUT */ - boolean_t *wired, /* OUT */ - vm_object_fault_info_t fault_info, /* OUT */ - vm_map_t *real_map) -{ - vm_map_entry_t entry; - register vm_map_t map = *var_map; - vm_map_t old_map = *var_map; - vm_map_t cow_sub_map_parent = VM_MAP_NULL; - vm_map_offset_t cow_parent_vaddr = 0; - vm_map_offset_t old_start = 0; - vm_map_offset_t old_end = 0; - register vm_prot_t prot; - boolean_t mask_protections; - vm_prot_t original_fault_type; + vm_map_t *var_map, /* IN/OUT */ + vm_map_offset_t vaddr, + vm_prot_t fault_type, + int object_lock_type, + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_object_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired, /* OUT */ + vm_object_fault_info_t fault_info, /* OUT */ + vm_map_t *real_map) +{ + vm_map_entry_t entry; + vm_map_t map = *var_map; + vm_map_t old_map = *var_map; + vm_map_t cow_sub_map_parent = VM_MAP_NULL; + vm_map_offset_t cow_parent_vaddr = 0; + vm_map_offset_t old_start = 0; + vm_map_offset_t old_end = 0; + vm_prot_t prot; + boolean_t mask_protections; + boolean_t force_copy; + vm_prot_t original_fault_type; /* * VM_PROT_MASK means that the caller wants us to use "fault_type" @@ -8479,7 +12749,8 @@ vm_map_lookup_locked( * absolute value. */ mask_protections = (fault_type & VM_PROT_IS_MASK) ? TRUE : FALSE; - fault_type &= ~VM_PROT_IS_MASK; + force_copy = (fault_type & VM_PROT_COPY) ? TRUE : FALSE; + fault_type &= VM_PROT_ALL; original_fault_type = fault_type; *real_map = map; @@ -8495,24 +12766,26 @@ RetryLookup: if ((entry == vm_map_to_entry(map)) || (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) { - vm_map_entry_t tmp_entry; + vm_map_entry_t tmp_entry; /* * Entry was either not a valid hint, or the vaddr * was not contained in the entry, so do a full lookup. */ if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { - if((cow_sub_map_parent) && (cow_sub_map_parent != map)) + if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) { vm_map_unlock(cow_sub_map_parent); - if((*real_map != map) - && (*real_map != cow_sub_map_parent)) + } + if ((*real_map != map) + && (*real_map != cow_sub_map_parent)) { vm_map_unlock(*real_map); + } return KERN_INVALID_ADDRESS; } entry = tmp_entry; } - if(map == old_map) { + if (map == old_map) { old_start = entry->vme_start; old_end = entry->vme_end; } @@ -8524,32 +12797,39 @@ RetryLookup: submap_recurse: if (entry->is_sub_map) { - vm_map_offset_t local_vaddr; - vm_map_offset_t end_delta; - vm_map_offset_t start_delta; - vm_map_entry_t submap_entry; - boolean_t mapped_needs_copy=FALSE; + vm_map_offset_t local_vaddr; + vm_map_offset_t end_delta; + vm_map_offset_t start_delta; + vm_map_entry_t submap_entry; + vm_prot_t subentry_protection; + vm_prot_t subentry_max_protection; + boolean_t subentry_no_copy_on_read; + boolean_t mapped_needs_copy = FALSE; local_vaddr = vaddr; - if ((entry->use_pmap && !(fault_type & VM_PROT_WRITE))) { + if ((entry->use_pmap && + !((fault_type & VM_PROT_WRITE) || + force_copy))) { /* if real_map equals map we unlock below */ - if ((*real_map != map) && - (*real_map != cow_sub_map_parent)) + if ((*real_map != map) && + (*real_map != cow_sub_map_parent)) { vm_map_unlock(*real_map); - *real_map = entry->object.sub_map; + } + *real_map = VME_SUBMAP(entry); } - if(entry->needs_copy && (fault_type & VM_PROT_WRITE)) { + if (entry->needs_copy && + ((fault_type & VM_PROT_WRITE) || + force_copy)) { if (!mapped_needs_copy) { if (vm_map_lock_read_to_write(map)) { vm_map_lock_read(map); - /* XXX FBDP: entry still valid ? */ - if(*real_map == entry->object.sub_map) - *real_map = map; + *real_map = map; goto RetryLookup; } - vm_map_lock_read(entry->object.sub_map); + vm_map_lock_read(VME_SUBMAP(entry)); + *var_map = VME_SUBMAP(entry); cow_sub_map_parent = map; /* reset base to map before cow object */ /* this is the map which will accept */ @@ -8559,35 +12839,38 @@ submap_recurse: cow_parent_vaddr = vaddr; mapped_needs_copy = TRUE; } else { - vm_map_lock_read(entry->object.sub_map); - if((cow_sub_map_parent != map) && - (*real_map != map)) + vm_map_lock_read(VME_SUBMAP(entry)); + *var_map = VME_SUBMAP(entry); + if ((cow_sub_map_parent != map) && + (*real_map != map)) { vm_map_unlock(map); + } } } else { - vm_map_lock_read(entry->object.sub_map); + vm_map_lock_read(VME_SUBMAP(entry)); + *var_map = VME_SUBMAP(entry); /* leave map locked if it is a target */ /* cow sub_map above otherwise, just */ /* follow the maps down to the object */ /* here we unlock knowing we are not */ /* revisiting the map. */ - if((*real_map != map) && (map != cow_sub_map_parent)) + if ((*real_map != map) && (map != cow_sub_map_parent)) { vm_map_unlock_read(map); + } } - /* XXX FBDP: map has been unlocked, what protects "entry" !? */ - *var_map = map = entry->object.sub_map; + map = *var_map; /* calculate the offset in the submap for vaddr */ - local_vaddr = (local_vaddr - entry->vme_start) + entry->offset; + local_vaddr = (local_vaddr - entry->vme_start) + VME_OFFSET(entry); - RetrySubMap: - if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { - if((cow_sub_map_parent) && (cow_sub_map_parent != map)){ +RetrySubMap: + if (!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { + if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) { vm_map_unlock(cow_sub_map_parent); } - if((*real_map != map) - && (*real_map != cow_sub_map_parent)) { + if ((*real_map != map) + && (*real_map != cow_sub_map_parent)) { vm_map_unlock(*real_map); } *real_map = map; @@ -8604,32 +12887,33 @@ submap_recurse: /* ultimately be clipped in the top map will only need */ /* to be as big as the portion of the underlying entry */ /* which is mapped */ - start_delta = submap_entry->vme_start > entry->offset ? - submap_entry->vme_start - entry->offset : 0; + start_delta = submap_entry->vme_start > VME_OFFSET(entry) ? + submap_entry->vme_start - VME_OFFSET(entry) : 0; - end_delta = - (entry->offset + start_delta + (old_end - old_start)) <= - submap_entry->vme_end ? - 0 : (entry->offset + - (old_end - old_start)) - - submap_entry->vme_end; + end_delta = + (VME_OFFSET(entry) + start_delta + (old_end - old_start)) <= + submap_entry->vme_end ? + 0 : (VME_OFFSET(entry) + + (old_end - old_start)) + - submap_entry->vme_end; old_start += start_delta; old_end -= end_delta; - if(submap_entry->is_sub_map) { + if (submap_entry->is_sub_map) { entry = submap_entry; vaddr = local_vaddr; goto submap_recurse; } - if(((fault_type & VM_PROT_WRITE) && cow_sub_map_parent)) { - - vm_object_t sub_object, copy_object; + if (((fault_type & VM_PROT_WRITE) || + force_copy) + && cow_sub_map_parent) { + vm_object_t sub_object, copy_object; vm_object_offset_t copy_offset; - vm_map_offset_t local_start; - vm_map_offset_t local_end; - boolean_t copied_slowly = FALSE; + vm_map_offset_t local_start; + vm_map_offset_t local_end; + boolean_t copied_slowly = FALSE; if (vm_map_lock_read_to_write(map)) { vm_map_lock_read(map); @@ -8639,24 +12923,28 @@ submap_recurse: } - sub_object = submap_entry->object.vm_object; + sub_object = VME_OBJECT(submap_entry); if (sub_object == VM_OBJECT_NULL) { sub_object = - vm_object_allocate( - (vm_map_size_t) - (submap_entry->vme_end - - submap_entry->vme_start)); - submap_entry->object.vm_object = sub_object; - submap_entry->offset = 0; - } - local_start = local_vaddr - - (cow_parent_vaddr - old_start); - local_end = local_vaddr + - (old_end - cow_parent_vaddr); + vm_object_allocate( + (vm_map_size_t) + (submap_entry->vme_end - + submap_entry->vme_start)); + VME_OBJECT_SET(submap_entry, sub_object); + VME_OFFSET_SET(submap_entry, 0); + assert(!submap_entry->is_sub_map); + assert(submap_entry->use_pmap); + } + local_start = local_vaddr - + (cow_parent_vaddr - old_start); + local_end = local_vaddr + + (old_end - cow_parent_vaddr); vm_map_clip_start(map, submap_entry, local_start); vm_map_clip_end(map, submap_entry, local_end); - /* unnesting was done in vm_map_clip_start/end() */ - assert(!submap_entry->use_pmap); + if (submap_entry->is_sub_map) { + /* unnesting was done when clipping */ + assert(!submap_entry->use_pmap); + } /* This is the COW case, lets connect */ /* an entry in our space to the underlying */ @@ -8664,48 +12952,57 @@ submap_recurse: /* submap. */ - if(submap_entry->wired_count != 0 || - (sub_object->copy_strategy == + if (submap_entry->wired_count != 0 || + (sub_object->copy_strategy == MEMORY_OBJECT_COPY_NONE)) { vm_object_lock(sub_object); vm_object_copy_slowly(sub_object, - submap_entry->offset, - (submap_entry->vme_end - - submap_entry->vme_start), - FALSE, - ©_object); + VME_OFFSET(submap_entry), + (submap_entry->vme_end - + submap_entry->vme_start), + FALSE, + ©_object); copied_slowly = TRUE; } else { - /* set up shadow object */ copy_object = sub_object; - vm_object_reference(copy_object); + vm_object_lock(sub_object); + vm_object_reference_locked(sub_object); sub_object->shadowed = TRUE; + vm_object_unlock(sub_object); + + assert(submap_entry->wired_count == 0); submap_entry->needs_copy = TRUE; - prot = submap_entry->protection & ~VM_PROT_WRITE; + prot = submap_entry->protection; + assert(!pmap_has_prot_policy(prot)); + prot = prot & ~VM_PROT_WRITE; + assert(!pmap_has_prot_policy(prot)); - if (override_nx(map, submap_entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(old_map, + VME_ALIAS(submap_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } vm_object_pmap_protect( sub_object, - submap_entry->offset, - submap_entry->vme_end - + VME_OFFSET(submap_entry), + submap_entry->vme_end - submap_entry->vme_start, - (submap_entry->is_shared - || map->mapped) ? + (submap_entry->is_shared + || map->mapped_in_other_pmaps) ? PMAP_NULL : map->pmap, submap_entry->vme_start, prot); } - + /* * Adjust the fault offset to the submap entry. */ copy_offset = (local_vaddr - - submap_entry->vme_start + - submap_entry->offset); + submap_entry->vme_start + + VME_OFFSET(submap_entry)); /* This works diffently than the */ /* normal submap case. We go back */ @@ -8714,7 +13011,12 @@ submap_recurse: /* the sub_map, substituting the */ /* new copy object, */ + subentry_protection = submap_entry->protection; + subentry_max_protection = submap_entry->max_protection; + subentry_no_copy_on_read = submap_entry->vme_no_copy_on_read; vm_map_unlock(map); + submap_entry = NULL; /* not valid after map unlock */ + local_start = old_start; local_end = old_end; map = cow_sub_map_parent; @@ -8722,14 +13024,14 @@ submap_recurse: vaddr = cow_parent_vaddr; cow_sub_map_parent = NULL; - if(!vm_map_lookup_entry(map, - vaddr, &entry)) { + if (!vm_map_lookup_entry(map, + vaddr, &entry)) { vm_object_deallocate( copy_object); vm_map_lock_write_to_read(map); return KERN_INVALID_ADDRESS; } - + /* clip out the portion of space */ /* mapped by the sub map which */ /* corresponds to the underlying */ @@ -8759,46 +13061,89 @@ submap_recurse: vm_map_clip_start(map, entry, local_start); vm_map_clip_end(map, entry, local_end); - /* unnesting was done in vm_map_clip_start/end() */ - assert(!entry->use_pmap); + if (entry->is_sub_map) { + /* unnesting was done when clipping */ + assert(!entry->use_pmap); + } /* substitute copy object for */ /* shared map entry */ - vm_map_deallocate(entry->object.sub_map); + vm_map_deallocate(VME_SUBMAP(entry)); + assert(!entry->iokit_acct); entry->is_sub_map = FALSE; - entry->object.vm_object = copy_object; + entry->use_pmap = TRUE; + VME_OBJECT_SET(entry, copy_object); /* propagate the submap entry's protections */ - entry->protection |= submap_entry->protection; - entry->max_protection |= submap_entry->max_protection; + if (entry->protection != VM_PROT_READ) { + /* + * Someone has already altered the top entry's + * protections via vm_protect(VM_PROT_COPY). + * Respect these new values and ignore the + * submap entry's protections. + */ + } else { + /* + * Regular copy-on-write: propagate the submap + * entry's protections to the top map entry. + */ + entry->protection |= subentry_protection; + } + entry->max_protection |= subentry_max_protection; + /* propagate no_copy_on_read */ + entry->vme_no_copy_on_read = subentry_no_copy_on_read; + + if ((entry->protection & VM_PROT_WRITE) && + (entry->protection & VM_PROT_EXECUTE) && +#if !CONFIG_EMBEDDED + map != kernel_map && + cs_process_enforcement(NULL) && +#endif /* !CONFIG_EMBEDDED */ + !(entry->used_for_jit)) { + DTRACE_VM3(cs_wx, + uint64_t, (uint64_t)entry->vme_start, + uint64_t, (uint64_t)entry->vme_end, + vm_prot_t, entry->protection); + printf("CODE SIGNING: %d[%s] %s can't have both write and exec at the same time\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); + entry->protection &= ~VM_PROT_EXECUTE; + } - if(copied_slowly) { - entry->offset = local_start - old_start; + if (copied_slowly) { + VME_OFFSET_SET(entry, local_start - old_start); entry->needs_copy = FALSE; entry->is_shared = FALSE; } else { - entry->offset = copy_offset; + VME_OFFSET_SET(entry, copy_offset); + assert(entry->wired_count == 0); entry->needs_copy = TRUE; - if(entry->inheritance == VM_INHERIT_SHARE) + if (entry->inheritance == VM_INHERIT_SHARE) { entry->inheritance = VM_INHERIT_COPY; - if (map != old_map) + } + if (map != old_map) { entry->is_shared = TRUE; + } } - if(entry->inheritance == VM_INHERIT_SHARE) + if (entry->inheritance == VM_INHERIT_SHARE) { entry->inheritance = VM_INHERIT_COPY; + } vm_map_lock_write_to_read(map); } else { - if((cow_sub_map_parent) - && (cow_sub_map_parent != *real_map) - && (cow_sub_map_parent != map)) { + if ((cow_sub_map_parent) + && (cow_sub_map_parent != *real_map) + && (cow_sub_map_parent != map)) { vm_map_unlock(cow_sub_map_parent); } entry = submap_entry; vaddr = local_vaddr; } } - + /* * Check whether this task is allowed to have * this page. @@ -8806,11 +13151,11 @@ submap_recurse: prot = entry->protection; - if (override_nx(map, entry->alias) && prot) { - /* + if (override_nx(old_map, VME_ALIAS(entry)) && prot) { + /* * HACK -- if not a stack, then allow execution */ - prot |= VM_PROT_EXECUTE; + prot |= VM_PROT_EXECUTE; } if (mask_protections) { @@ -8819,15 +13164,21 @@ submap_recurse: goto protection_failure; } } - if ((fault_type & (prot)) != fault_type) { - protection_failure: + if (((fault_type & prot) != fault_type) +#if __arm64__ + /* prefetch abort in execute-only page */ + && !(prot == VM_PROT_EXECUTE && fault_type == (VM_PROT_READ | VM_PROT_EXECUTE)) +#endif + ) { +protection_failure: if (*real_map != map) { vm_map_unlock(*real_map); } *real_map = map; - if ((fault_type & VM_PROT_EXECUTE) && prot) - log_stack_execution_failure((addr64_t)vaddr, prot); + if ((fault_type & VM_PROT_EXECUTE) && prot) { + log_stack_execution_failure((addr64_t)vaddr, prot); + } DTRACE_VM2(prot_fault, int, 1, (uint64_t *), NULL); return KERN_PROTECTION_FAILURE; @@ -8839,15 +13190,16 @@ submap_recurse: */ *wired = (entry->wired_count != 0); - if (*wired) - fault_type = prot; + if (*wired) { + fault_type = prot; + } /* * If the entry was copy-on-write, we either ... */ if (entry->needs_copy) { - /* + /* * If we want to write the page, we may as well * handle that now since we've got the map locked. * @@ -8855,7 +13207,7 @@ submap_recurse: * demote the permissions allowed. */ - if ((fault_type & VM_PROT_WRITE) || *wired) { + if ((fault_type & VM_PROT_WRITE) || *wired || force_copy) { /* * Make a new object, and place it in the * object chain. Note that no new references @@ -8867,16 +13219,20 @@ submap_recurse: vm_map_lock_read(map); goto RetryLookup; } - vm_object_shadow(&entry->object.vm_object, - &entry->offset, - (vm_map_size_t) (entry->vme_end - - entry->vme_start)); - entry->object.vm_object->shadowed = TRUE; + if (VME_OBJECT(entry)->shadowed == FALSE) { + vm_object_lock(VME_OBJECT(entry)); + VME_OBJECT(entry)->shadowed = TRUE; + vm_object_unlock(VME_OBJECT(entry)); + } + VME_OBJECT_SHADOW(entry, + (vm_map_size_t) (entry->vme_end - + entry->vme_start)); entry->needs_copy = FALSE; + vm_map_lock_write_to_read(map); } - else { + if ((fault_type & VM_PROT_WRITE) == 0 && *wired == 0) { /* * We're attempting to read a copy-on-write * page -- don't allow writes. @@ -8889,16 +13245,18 @@ submap_recurse: /* * Create an object if necessary. */ - if (entry->object.vm_object == VM_OBJECT_NULL) { - + if (VME_OBJECT(entry) == VM_OBJECT_NULL) { if (vm_map_lock_read_to_write(map)) { vm_map_lock_read(map); goto RetryLookup; } - entry->object.vm_object = vm_object_allocate( - (vm_map_size_t)(entry->vme_end - entry->vme_start)); - entry->offset = 0; + VME_OBJECT_SET(entry, + vm_object_allocate( + (vm_map_size_t)(entry->vme_end - + entry->vme_start))); + VME_OFFSET_SET(entry, 0); + assert(entry->use_pmap); vm_map_lock_write_to_read(map); } @@ -8908,33 +13266,59 @@ submap_recurse: * return the protection. */ - *offset = (vaddr - entry->vme_start) + entry->offset; - *object = entry->object.vm_object; + *offset = (vaddr - entry->vme_start) + VME_OFFSET(entry); + *object = VME_OBJECT(entry); *out_prot = prot; + KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_MAP_LOOKUP_OBJECT), VM_KERNEL_UNSLIDE_OR_PERM(*object), 0, 0, 0, 0); if (fault_info) { fault_info->interruptible = THREAD_UNINT; /* for now... */ /* ... the caller will change "interruptible" if needed */ - fault_info->cluster_size = 0; - fault_info->user_tag = entry->alias; - fault_info->behavior = entry->behavior; - fault_info->lo_offset = entry->offset; - fault_info->hi_offset = (entry->vme_end - entry->vme_start) + entry->offset; + fault_info->cluster_size = 0; + fault_info->user_tag = VME_ALIAS(entry); + fault_info->pmap_options = 0; + if (entry->iokit_acct || + (!entry->is_sub_map && !entry->use_pmap)) { + fault_info->pmap_options |= PMAP_OPTIONS_ALT_ACCT; + } + fault_info->behavior = entry->behavior; + fault_info->lo_offset = VME_OFFSET(entry); + fault_info->hi_offset = + (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); fault_info->no_cache = entry->no_cache; fault_info->stealth = FALSE; fault_info->io_sync = FALSE; - fault_info->cs_bypass = (entry->used_for_jit)? TRUE : FALSE; + if (entry->used_for_jit || + entry->vme_resilient_codesign) { + fault_info->cs_bypass = TRUE; + } else { + fault_info->cs_bypass = FALSE; + } + fault_info->pmap_cs_associated = FALSE; +#if CONFIG_PMAP_CS + if (entry->pmap_cs_associated) { + /* + * The pmap layer will validate this page + * before allowing it to be executed from. + */ + fault_info->pmap_cs_associated = TRUE; + } +#endif /* CONFIG_PMAP_CS */ fault_info->mark_zf_absent = FALSE; + fault_info->batch_pmap_op = FALSE; + fault_info->resilient_media = entry->vme_resilient_media; + fault_info->no_copy_on_read = entry->vme_no_copy_on_read; } /* * Lock the object to prevent it from disappearing */ - if (object_lock_type == OBJECT_LOCK_EXCLUSIVE) - vm_object_lock(*object); - else - vm_object_lock_shared(*object); - + if (object_lock_type == OBJECT_LOCK_EXCLUSIVE) { + vm_object_lock(*object); + } else { + vm_object_lock_shared(*object); + } + /* * Save the version number */ @@ -8949,35 +13333,23 @@ submap_recurse: * vm_map_verify: * * Verifies that the map in question has not changed - * since the given version. If successful, the map - * will not change until vm_map_verify_done() is called. + * since the given version. The map has to be locked + * ("shared" mode is fine) before calling this function + * and it will be returned locked too. */ boolean_t vm_map_verify( - register vm_map_t map, - register vm_map_version_t *version) /* REF */ + vm_map_t map, + vm_map_version_t *version) /* REF */ { - boolean_t result; + boolean_t result; - vm_map_lock_read(map); + vm_map_lock_assert_held(map); result = (map->timestamp == version->main_timestamp); - if (!result) - vm_map_unlock_read(map); - - return(result); + return result; } -/* - * vm_map_verify_done: - * - * Releases locks acquired by a vm_map_verify. - * - * This is now a macro in vm/vm_map.h. It does a - * vm_map_unlock_read on the map. - */ - - /* * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY * Goes away after regular vm_region_recurse function migrates to @@ -8989,42 +13361,43 @@ vm_map_verify( kern_return_t vm_map_region_recurse_64( - vm_map_t map, - vm_map_offset_t *address, /* IN/OUT */ - vm_map_size_t *size, /* OUT */ - natural_t *nesting_depth, /* IN/OUT */ - vm_region_submap_info_64_t submap_info, /* IN/OUT */ - mach_msg_type_number_t *count) /* IN/OUT */ + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t *size, /* OUT */ + natural_t *nesting_depth, /* IN/OUT */ + vm_region_submap_info_64_t submap_info, /* IN/OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ { - vm_region_extended_info_data_t extended; - vm_map_entry_t tmp_entry; - vm_map_offset_t user_address; - unsigned int user_max_depth; + mach_msg_type_number_t original_count; + vm_region_extended_info_data_t extended; + vm_map_entry_t tmp_entry; + vm_map_offset_t user_address; + unsigned int user_max_depth; /* * "curr_entry" is the VM map entry preceding or including the * address we're looking for. * "curr_map" is the map or sub-map containing "curr_entry". - * "curr_address" is the equivalent of the top map's "user_address" + * "curr_address" is the equivalent of the top map's "user_address" * in the current map. * "curr_offset" is the cumulated offset of "curr_map" in the * target task's address space. * "curr_depth" is the depth of "curr_map" in the chain of * sub-maps. - * + * * "curr_max_below" and "curr_max_above" limit the range (around * "curr_address") we should take into account in the current (sub)map. * They limit the range to what's visible through the map entries * we've traversed from the top map to the current map. - + * */ - vm_map_entry_t curr_entry; - vm_map_address_t curr_address; - vm_map_offset_t curr_offset; - vm_map_t curr_map; - unsigned int curr_depth; - vm_map_offset_t curr_max_below, curr_max_above; - vm_map_offset_t curr_skip; + vm_map_entry_t curr_entry; + vm_map_address_t curr_address; + vm_map_offset_t curr_offset; + vm_map_t curr_map; + unsigned int curr_depth; + vm_map_offset_t curr_max_below, curr_max_above; + vm_map_offset_t curr_skip; /* * "next_" is the same as "curr_" but for the VM region immediately @@ -9032,45 +13405,61 @@ vm_map_region_recurse_64( * too because we want to return info about that region if the * address we're looking for is not mapped. */ - vm_map_entry_t next_entry; - vm_map_offset_t next_offset; - vm_map_offset_t next_address; - vm_map_t next_map; - unsigned int next_depth; - vm_map_offset_t next_max_below, next_max_above; - vm_map_offset_t next_skip; - - boolean_t look_for_pages; + vm_map_entry_t next_entry; + vm_map_offset_t next_offset; + vm_map_offset_t next_address; + vm_map_t next_map; + unsigned int next_depth; + vm_map_offset_t next_max_below, next_max_above; + vm_map_offset_t next_skip; + + boolean_t look_for_pages; vm_region_submap_short_info_64_t short_info; + boolean_t do_region_footprint; if (map == VM_MAP_NULL) { /* no address space to work on */ return KERN_INVALID_ARGUMENT; } - if (*count < VM_REGION_SUBMAP_INFO_COUNT_64) { - if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) { - /* - * "info" structure is not big enough and - * would overflow - */ - return KERN_INVALID_ARGUMENT; - } else { - look_for_pages = FALSE; - *count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; - short_info = (vm_region_submap_short_info_64_t) submap_info; - submap_info = NULL; - } + + if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) { + /* + * "info" structure is not big enough and + * would overflow + */ + return KERN_INVALID_ARGUMENT; + } + + do_region_footprint = task_self_region_footprint(); + original_count = *count; + + if (original_count < VM_REGION_SUBMAP_INFO_V0_COUNT_64) { + *count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + look_for_pages = FALSE; + short_info = (vm_region_submap_short_info_64_t) submap_info; + submap_info = NULL; } else { look_for_pages = TRUE; - *count = VM_REGION_SUBMAP_INFO_COUNT_64; + *count = VM_REGION_SUBMAP_INFO_V0_COUNT_64; short_info = NULL; - } + if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) { + *count = VM_REGION_SUBMAP_INFO_V1_COUNT_64; + } + if (original_count >= VM_REGION_SUBMAP_INFO_V2_COUNT_64) { + *count = VM_REGION_SUBMAP_INFO_V2_COUNT_64; + } + } user_address = *address; user_max_depth = *nesting_depth; - + + if (not_in_kdp) { + vm_map_lock_read(map); + } + +recurse_again: curr_entry = NULL; curr_map = map; curr_address = user_address; @@ -9089,14 +13478,10 @@ vm_map_region_recurse_64( next_max_above = (vm_map_offset_t) -1; next_max_below = (vm_map_offset_t) -1; - if (not_in_kdp) { - vm_map_lock_read(curr_map); - } - for (;;) { if (vm_map_lookup_entry(curr_map, - curr_address, - &tmp_entry)) { + curr_address, + &tmp_entry)) { /* tmp_entry contains the address we're looking for */ curr_entry = tmp_entry; } else { @@ -9110,13 +13495,14 @@ vm_map_region_recurse_64( if (curr_entry == vm_map_to_entry(curr_map) || (curr_entry->vme_start >= - curr_address + curr_max_above)) { + curr_address + curr_max_above)) { /* no next entry at this level: stop looking */ if (not_in_kdp) { vm_map_unlock_read(curr_map); } curr_entry = NULL; curr_map = NULL; + curr_skip = 0; curr_offset = 0; curr_depth = 0; curr_max_above = 0; @@ -9127,7 +13513,7 @@ vm_map_region_recurse_64( /* adjust current address and offset */ skip = curr_entry->vme_start - curr_address; curr_address = curr_entry->vme_start; - curr_skip = skip; + curr_skip += skip; curr_offset += skip; curr_max_above -= skip; curr_max_below = 0; @@ -9142,14 +13528,14 @@ vm_map_region_recurse_64( if (tmp_entry == vm_map_to_entry(curr_map)) { /* no next entry at this level */ } else if (tmp_entry->vme_start >= - curr_address + curr_max_above) { + curr_address + curr_max_above) { /* * tmp_entry is beyond the scope of what we mapped of * this submap in the upper level: ignore it. */ } else if ((next_entry == NULL) || - (tmp_entry->vme_start + curr_offset <= - next_entry->vme_start + next_offset)) { + (tmp_entry->vme_start + curr_offset <= + next_entry->vme_start + next_offset)) { /* * We didn't have a "next_entry" or this one is * closer to the address we're looking for: @@ -9166,14 +13552,15 @@ vm_map_region_recurse_64( next_depth = curr_depth; next_address = next_entry->vme_start; next_skip = curr_skip; + next_skip += (next_address - curr_address); next_offset = curr_offset; next_offset += (next_address - curr_address); next_max_above = MIN(next_max_above, curr_max_above); next_max_above = MIN(next_max_above, - next_entry->vme_end - next_address); + next_entry->vme_end - next_address); next_max_below = MIN(next_max_below, curr_max_below); next_max_below = MIN(next_max_below, - next_address - next_entry->vme_start); + next_address - next_entry->vme_start); } /* @@ -9182,12 +13569,12 @@ vm_map_region_recurse_64( * the rest of that submap is irrelevant to us, since it's not * mapped here. * The relevant portion of the map starts at - * "curr_entry->offset" up to the size of "curr_entry". + * "VME_OFFSET(curr_entry)" up to the size of "curr_entry". */ curr_max_above = MIN(curr_max_above, - curr_entry->vme_end - curr_address); + curr_entry->vme_end - curr_address); curr_max_below = MIN(curr_max_below, - curr_address - curr_entry->vme_start); + curr_address - curr_entry->vme_start); if (!curr_entry->is_sub_map || curr_depth >= user_max_depth) { @@ -9209,52 +13596,117 @@ vm_map_region_recurse_64( * later. */ if (not_in_kdp) { - vm_map_lock_read(curr_entry->object.sub_map); + vm_map_lock_read(VME_SUBMAP(curr_entry)); } if (curr_map == next_map) { /* keep "next_map" locked in case we need it */ } else { /* release this map */ - if (not_in_kdp) + if (not_in_kdp) { vm_map_unlock_read(curr_map); + } } /* * Adjust the offset. "curr_entry" maps the submap * at relative address "curr_entry->vme_start" in the - * curr_map but skips the first "curr_entry->offset" + * curr_map but skips the first "VME_OFFSET(curr_entry)" * bytes of the submap. * "curr_offset" always represents the offset of a virtual * address in the curr_map relative to the absolute address * space (i.e. the top-level VM map). */ curr_offset += - (curr_entry->offset - curr_entry->vme_start); + (VME_OFFSET(curr_entry) - curr_entry->vme_start); curr_address = user_address + curr_offset; /* switch to the submap */ - curr_map = curr_entry->object.sub_map; + curr_map = VME_SUBMAP(curr_entry); curr_depth++; curr_entry = NULL; } +// LP64todo: all the current tools are 32bit, obviously never worked for 64b +// so probably should be a real 32b ID vs. ptr. +// Current users just check for equality + if (curr_entry == NULL) { /* no VM region contains the address... */ + + if (do_region_footprint && /* we want footprint numbers */ + next_entry == NULL && /* & there are no more regions */ + /* & we haven't already provided our fake region: */ + user_address <= vm_map_last_entry(map)->vme_end) { + ledger_amount_t ledger_resident, ledger_compressed; + + /* + * Add a fake memory region to account for + * purgeable and/or ledger-tagged memory that + * counts towards this task's memory footprint, + * i.e. the resident/compressed pages of non-volatile + * objects owned by that task. + */ + task_ledgers_footprint(map->pmap->ledger, + &ledger_resident, + &ledger_compressed); + if (ledger_resident + ledger_compressed == 0) { + /* no purgeable memory usage to report */ + return KERN_INVALID_ADDRESS; + } + /* fake region to show nonvolatile footprint */ + if (look_for_pages) { + submap_info->protection = VM_PROT_DEFAULT; + submap_info->max_protection = VM_PROT_DEFAULT; + submap_info->inheritance = VM_INHERIT_DEFAULT; + submap_info->offset = 0; + submap_info->user_tag = -1; + submap_info->pages_resident = (unsigned int) (ledger_resident / PAGE_SIZE); + submap_info->pages_shared_now_private = 0; + submap_info->pages_swapped_out = (unsigned int) (ledger_compressed / PAGE_SIZE); + submap_info->pages_dirtied = submap_info->pages_resident; + submap_info->ref_count = 1; + submap_info->shadow_depth = 0; + submap_info->external_pager = 0; + submap_info->share_mode = SM_PRIVATE; + submap_info->is_submap = 0; + submap_info->behavior = VM_BEHAVIOR_DEFAULT; + submap_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + submap_info->user_wired_count = 0; + submap_info->pages_reusable = 0; + } else { + short_info->user_tag = -1; + short_info->offset = 0; + short_info->protection = VM_PROT_DEFAULT; + short_info->inheritance = VM_INHERIT_DEFAULT; + short_info->max_protection = VM_PROT_DEFAULT; + short_info->behavior = VM_BEHAVIOR_DEFAULT; + short_info->user_wired_count = 0; + short_info->is_submap = 0; + short_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + short_info->external_pager = 0; + short_info->shadow_depth = 0; + short_info->share_mode = SM_PRIVATE; + short_info->ref_count = 1; + } + *nesting_depth = 0; + *size = (vm_map_size_t) (ledger_resident + ledger_compressed); +// *address = user_address; + *address = vm_map_last_entry(map)->vme_end; + return KERN_SUCCESS; + } + if (next_entry == NULL) { /* ... and no VM region follows it either */ return KERN_INVALID_ADDRESS; } /* ... gather info about the next VM region */ curr_entry = next_entry; - curr_map = next_map; /* still locked ... */ + curr_map = next_map; /* still locked ... */ curr_address = next_address; curr_skip = next_skip; curr_offset = next_offset; curr_depth = next_depth; curr_max_above = next_max_above; curr_max_below = next_max_below; - if (curr_map == map) { - user_address = curr_address; - } } else { /* we won't need "next_entry" after all */ if (next_entry != NULL) { @@ -9272,6 +13724,18 @@ vm_map_region_recurse_64( next_max_below = -1; next_max_above = -1; + if (curr_entry->is_sub_map && + curr_depth < user_max_depth) { + /* + * We're not as deep as we could be: we must have + * gone back up after not finding anything mapped + * below the original top-level map entry's. + * Let's move "curr_address" forward and recurse again. + */ + user_address = curr_address; + goto recurse_again; + } + *nesting_depth = curr_depth; *size = curr_max_above + curr_max_below; *address = user_address + curr_skip - curr_max_below; @@ -9279,53 +13743,56 @@ vm_map_region_recurse_64( // LP64todo: all the current tools are 32bit, obviously never worked for 64b // so probably should be a real 32b ID vs. ptr. // Current users just check for equality -#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)p) +#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM(p)) if (look_for_pages) { - submap_info->user_tag = curr_entry->alias; - submap_info->offset = curr_entry->offset; + submap_info->user_tag = VME_ALIAS(curr_entry); + submap_info->offset = VME_OFFSET(curr_entry); submap_info->protection = curr_entry->protection; submap_info->inheritance = curr_entry->inheritance; submap_info->max_protection = curr_entry->max_protection; submap_info->behavior = curr_entry->behavior; submap_info->user_wired_count = curr_entry->user_wired_count; submap_info->is_submap = curr_entry->is_sub_map; - submap_info->object_id = INFO_MAKE_OBJECT_ID(curr_entry->object.vm_object); + submap_info->object_id = INFO_MAKE_OBJECT_ID(VME_OBJECT(curr_entry)); } else { - short_info->user_tag = curr_entry->alias; - short_info->offset = curr_entry->offset; + short_info->user_tag = VME_ALIAS(curr_entry); + short_info->offset = VME_OFFSET(curr_entry); short_info->protection = curr_entry->protection; short_info->inheritance = curr_entry->inheritance; short_info->max_protection = curr_entry->max_protection; short_info->behavior = curr_entry->behavior; short_info->user_wired_count = curr_entry->user_wired_count; short_info->is_submap = curr_entry->is_sub_map; - short_info->object_id = INFO_MAKE_OBJECT_ID(curr_entry->object.vm_object); + short_info->object_id = INFO_MAKE_OBJECT_ID(VME_OBJECT(curr_entry)); } extended.pages_resident = 0; extended.pages_swapped_out = 0; extended.pages_shared_now_private = 0; extended.pages_dirtied = 0; + extended.pages_reusable = 0; extended.external_pager = 0; extended.shadow_depth = 0; + extended.share_mode = SM_EMPTY; + extended.ref_count = 0; if (not_in_kdp) { if (!curr_entry->is_sub_map) { vm_map_offset_t range_start, range_end; range_start = MAX((curr_address - curr_max_below), - curr_entry->vme_start); + curr_entry->vme_start); range_end = MIN((curr_address + curr_max_above), - curr_entry->vme_end); + curr_entry->vme_end); vm_map_region_walk(curr_map, - range_start, - curr_entry, - (curr_entry->offset + - (range_start - - curr_entry->vme_start)), - range_end - range_start, - &extended, - look_for_pages); + range_start, + curr_entry, + (VME_OFFSET(curr_entry) + + (range_start - + curr_entry->vme_start)), + range_end - range_start, + &extended, + look_for_pages, VM_REGION_EXTENDED_INFO_COUNT); if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) { @@ -9337,8 +13804,7 @@ vm_map_region_recurse_64( } else { extended.share_mode = SM_PRIVATE; } - extended.ref_count = - curr_entry->object.sub_map->ref_count; + extended.ref_count = os_ref_get_count(&VME_SUBMAP(curr_entry)->map_refcnt); } } @@ -9346,12 +13812,19 @@ vm_map_region_recurse_64( submap_info->pages_resident = extended.pages_resident; submap_info->pages_swapped_out = extended.pages_swapped_out; submap_info->pages_shared_now_private = - extended.pages_shared_now_private; + extended.pages_shared_now_private; submap_info->pages_dirtied = extended.pages_dirtied; submap_info->external_pager = extended.external_pager; submap_info->shadow_depth = extended.shadow_depth; submap_info->share_mode = extended.share_mode; submap_info->ref_count = extended.ref_count; + + if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) { + submap_info->pages_reusable = extended.pages_reusable; + } + if (original_count >= VM_REGION_SUBMAP_INFO_V2_COUNT_64) { + submap_info->object_id_full = (vm_object_id_t) (VME_OBJECT(curr_entry) != NULL) ? VM_KERNEL_ADDRPERM(VME_OBJECT(curr_entry)) : 0ULL; + } } else { short_info->external_pager = extended.external_pager; short_info->shadow_depth = extended.shadow_depth; @@ -9380,30 +13853,31 @@ vm_map_region_recurse_64( kern_return_t vm_map_region( - vm_map_t map, - vm_map_offset_t *address, /* IN/OUT */ - vm_map_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; - vm_map_offset_t start; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + vm_map_offset_t start; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } switch (flavor) { - case VM_REGION_BASIC_INFO: /* legacy for old 32-bit objects info */ { - vm_region_basic_info_t basic; + vm_region_basic_info_t basic; - if (*count < VM_REGION_BASIC_INFO_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < VM_REGION_BASIC_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } basic = (vm_region_basic_info_t) info; *count = VM_REGION_BASIC_INFO_COUNT; @@ -9414,7 +13888,7 @@ vm_map_region( if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { entry = tmp_entry; @@ -9422,7 +13896,7 @@ vm_map_region( start = entry->vme_start; - basic->offset = (uint32_t)entry->offset; + basic->offset = (uint32_t)VME_OFFSET(entry); basic->protection = entry->protection; basic->inheritance = entry->inheritance; basic->max_protection = entry->max_protection; @@ -9432,7 +13906,9 @@ vm_map_region( *address = start; *size = (entry->vme_end - start); - if (object_name) *object_name = IP_NULL; + if (object_name) { + *object_name = IP_NULL; + } if (entry->is_sub_map) { basic->shared = FALSE; } else { @@ -9440,15 +13916,16 @@ vm_map_region( } vm_map_unlock_read(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } case VM_REGION_BASIC_INFO_64: { - vm_region_basic_info_64_t basic; + vm_region_basic_info_64_t basic; - if (*count < VM_REGION_BASIC_INFO_COUNT_64) - return(KERN_INVALID_ARGUMENT); + if (*count < VM_REGION_BASIC_INFO_COUNT_64) { + return KERN_INVALID_ARGUMENT; + } basic = (vm_region_basic_info_64_t) info; *count = VM_REGION_BASIC_INFO_COUNT_64; @@ -9459,7 +13936,7 @@ vm_map_region( if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { entry = tmp_entry; @@ -9467,7 +13944,7 @@ vm_map_region( start = entry->vme_start; - basic->offset = entry->offset; + basic->offset = VME_OFFSET(entry); basic->protection = entry->protection; basic->inheritance = entry->inheritance; basic->max_protection = entry->max_protection; @@ -9477,7 +13954,9 @@ vm_map_region( *address = start; *size = (entry->vme_end - start); - if (object_name) *object_name = IP_NULL; + if (object_name) { + *object_name = IP_NULL; + } if (entry->is_sub_map) { basic->shared = FALSE; } else { @@ -9485,59 +13964,76 @@ vm_map_region( } vm_map_unlock_read(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } case VM_REGION_EXTENDED_INFO: - { - vm_region_extended_info_t extended; + if (*count < VM_REGION_EXTENDED_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } + /*fallthru*/ + case VM_REGION_EXTENDED_INFO__legacy: + if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy) { + return KERN_INVALID_ARGUMENT; + } - if (*count < VM_REGION_EXTENDED_INFO_COUNT) - return(KERN_INVALID_ARGUMENT); + { + vm_region_extended_info_t extended; + mach_msg_type_number_t original_count; - extended = (vm_region_extended_info_t) info; - *count = VM_REGION_EXTENDED_INFO_COUNT; + extended = (vm_region_extended_info_t) info; - vm_map_lock_read(map); + vm_map_lock_read(map); - start = *address; - if (!vm_map_lookup_entry(map, start, &tmp_entry)) { - if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { - vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return KERN_INVALID_ADDRESS; + } + } else { + entry = tmp_entry; } - } else { - entry = tmp_entry; - } - start = entry->vme_start; + start = entry->vme_start; - extended->protection = entry->protection; - extended->user_tag = entry->alias; - extended->pages_resident = 0; - extended->pages_swapped_out = 0; - extended->pages_shared_now_private = 0; - extended->pages_dirtied = 0; - extended->external_pager = 0; - extended->shadow_depth = 0; + extended->protection = entry->protection; + extended->user_tag = VME_ALIAS(entry); + extended->pages_resident = 0; + extended->pages_swapped_out = 0; + extended->pages_shared_now_private = 0; + extended->pages_dirtied = 0; + extended->external_pager = 0; + extended->shadow_depth = 0; + + original_count = *count; + if (flavor == VM_REGION_EXTENDED_INFO__legacy) { + *count = VM_REGION_EXTENDED_INFO_COUNT__legacy; + } else { + extended->pages_reusable = 0; + *count = VM_REGION_EXTENDED_INFO_COUNT; + } - vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, extended, TRUE); + vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, extended, TRUE, *count); - if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) - extended->share_mode = SM_PRIVATE; + if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) { + extended->share_mode = SM_PRIVATE; + } - if (object_name) - *object_name = IP_NULL; - *address = start; - *size = (entry->vme_end - start); + if (object_name) { + *object_name = IP_NULL; + } + *address = start; + *size = (entry->vme_end - start); - vm_map_unlock_read(map); - return(KERN_SUCCESS); - } + vm_map_unlock_read(map); + return KERN_SUCCESS; + } case VM_REGION_TOP_INFO: - { - vm_region_top_info_t top; + { + vm_region_top_info_t top; - if (*count < VM_REGION_TOP_INFO_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < VM_REGION_TOP_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } top = (vm_region_top_info_t) info; *count = VM_REGION_TOP_INFO_COUNT; @@ -9548,11 +14044,10 @@ vm_map_region( if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { entry = tmp_entry; - } start = entry->vme_start; @@ -9561,32 +14056,32 @@ vm_map_region( vm_map_region_top_walk(entry, top); - if (object_name) + if (object_name) { *object_name = IP_NULL; + } *address = start; *size = (entry->vme_end - start); vm_map_unlock_read(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } default: - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } -#define OBJ_RESIDENT_COUNT(obj, entry_size) \ - MIN((entry_size), \ - ((obj)->all_reusable ? \ - (obj)->wired_page_count : \ +#define OBJ_RESIDENT_COUNT(obj, entry_size) \ + MIN((entry_size), \ + ((obj)->all_reusable ? \ + (obj)->wired_page_count : \ (obj)->resident_page_count - (obj)->reusable_page_count)) void vm_map_region_top_walk( - vm_map_entry_t entry, + vm_map_entry_t entry, vm_region_top_info_t top) { - - if (entry->object.vm_object == 0 || entry->is_sub_map) { + if (VME_OBJECT(entry) == 0 || entry->is_sub_map) { top->share_mode = SM_EMPTY; top->ref_count = 0; top->obj_id = 0; @@ -9594,41 +14089,44 @@ vm_map_region_top_walk( } { - struct vm_object *obj, *tmp_obj; - int ref_count; - uint32_t entry_size; + struct vm_object *obj, *tmp_obj; + int ref_count; + uint32_t entry_size; entry_size = (uint32_t) ((entry->vme_end - entry->vme_start) / PAGE_SIZE_64); - obj = entry->object.vm_object; + obj = VME_OBJECT(entry); vm_object_lock(obj); - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { ref_count--; + } assert(obj->reusable_page_count <= obj->resident_page_count); if (obj->shadow) { - if (ref_count == 1) + if (ref_count == 1) { top->private_pages_resident = - OBJ_RESIDENT_COUNT(obj, entry_size); - else + OBJ_RESIDENT_COUNT(obj, entry_size); + } else { top->shared_pages_resident = - OBJ_RESIDENT_COUNT(obj, entry_size); + OBJ_RESIDENT_COUNT(obj, entry_size); + } top->ref_count = ref_count; top->share_mode = SM_COW; - + while ((tmp_obj = obj->shadow)) { vm_object_lock(tmp_obj); vm_object_unlock(obj); obj = tmp_obj; - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { ref_count--; + } assert(obj->reusable_page_count <= obj->resident_page_count); top->shared_pages_resident += - OBJ_RESIDENT_COUNT(obj, entry_size); + OBJ_RESIDENT_COUNT(obj, entry_size); top->ref_count += ref_count - 1; } } else { @@ -9639,25 +14137,25 @@ vm_map_region_top_walk( } else if (entry->needs_copy) { top->share_mode = SM_COW; top->shared_pages_resident = - OBJ_RESIDENT_COUNT(obj, entry_size); + OBJ_RESIDENT_COUNT(obj, entry_size); } else { if (ref_count == 1 || - (ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) { + (ref_count == 2 && obj->named)) { top->share_mode = SM_PRIVATE; top->private_pages_resident = - OBJ_RESIDENT_COUNT(obj, - entry_size); + OBJ_RESIDENT_COUNT(obj, + entry_size); } else { top->share_mode = SM_SHARED; top->shared_pages_resident = - OBJ_RESIDENT_COUNT(obj, - entry_size); + OBJ_RESIDENT_COUNT(obj, + entry_size); } } top->ref_count = ref_count; } /* XXX K64: obj_id will be truncated */ - top->obj_id = (unsigned int) (uintptr_t)obj; + top->obj_id = (unsigned int) (uintptr_t)VM_KERNEL_ADDRPERM(obj); vm_object_unlock(obj); } @@ -9665,25 +14163,29 @@ vm_map_region_top_walk( void vm_map_region_walk( - vm_map_t map, - vm_map_offset_t va, - vm_map_entry_t entry, - vm_object_offset_t offset, - vm_object_size_t range, - vm_region_extended_info_t extended, - boolean_t look_for_pages) -{ - register struct vm_object *obj, *tmp_obj; - register vm_map_offset_t last_offset; - register int i; - register int ref_count; - struct vm_object *shadow_object; - int shadow_depth; - - if ((entry->object.vm_object == 0) || + vm_map_t map, + vm_map_offset_t va, + vm_map_entry_t entry, + vm_object_offset_t offset, + vm_object_size_t range, + vm_region_extended_info_t extended, + boolean_t look_for_pages, + mach_msg_type_number_t count) +{ + struct vm_object *obj, *tmp_obj; + vm_map_offset_t last_offset; + int i; + int ref_count; + struct vm_object *shadow_object; + int shadow_depth; + boolean_t do_region_footprint; + + do_region_footprint = task_self_region_footprint(); + + if ((VME_OBJECT(entry) == 0) || (entry->is_sub_map) || - (entry->object.vm_object->phys_contiguous && - !entry->superpage_size)) { + (VME_OBJECT(entry)->phys_contiguous && + !entry->superpage_size)) { extended->share_mode = SM_EMPTY; extended->ref_count = 0; return; @@ -9699,96 +14201,218 @@ vm_map_region_walk( return; } - { - obj = entry->object.vm_object; - - vm_object_lock(obj); - - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) - ref_count--; + obj = VME_OBJECT(entry); - if (look_for_pages) { - for (last_offset = offset + range; - offset < last_offset; - offset += PAGE_SIZE_64, va += PAGE_SIZE) - vm_map_region_look_for_page(map, va, obj, - offset, ref_count, - 0, extended); - } else { - shadow_object = obj->shadow; - shadow_depth = 0; + vm_object_lock(obj); - if ( !(obj->pager_trusted) && !(obj->internal)) - extended->external_pager = 1; + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { + ref_count--; + } - if (shadow_object != VM_OBJECT_NULL) { - vm_object_lock(shadow_object); - for (; - shadow_object != VM_OBJECT_NULL; - shadow_depth++) { - vm_object_t next_shadow; - - if ( !(shadow_object->pager_trusted) && - !(shadow_object->internal)) - extended->external_pager = 1; - - next_shadow = shadow_object->shadow; - if (next_shadow) { - vm_object_lock(next_shadow); + if (look_for_pages) { + for (last_offset = offset + range; + offset < last_offset; + offset += PAGE_SIZE_64, va += PAGE_SIZE) { + if (do_region_footprint) { + int disp; + + disp = 0; + if (map->has_corpse_footprint) { + /* + * Query the page info data we saved + * while forking the corpse. + */ + vm_map_corpse_footprint_query_page_info( + map, + va, + &disp); + } else { + /* + * Query the pmap. + */ + pmap_query_page_info(map->pmap, + va, + &disp); + } + if (disp & PMAP_QUERY_PAGE_PRESENT) { + if (!(disp & PMAP_QUERY_PAGE_ALTACCT)) { + extended->pages_resident++; + } + if (disp & PMAP_QUERY_PAGE_REUSABLE) { + extended->pages_reusable++; + } else if (!(disp & PMAP_QUERY_PAGE_INTERNAL) || + (disp & PMAP_QUERY_PAGE_ALTACCT)) { + /* alternate accounting */ + } else { + extended->pages_dirtied++; + } + } else if (disp & PMAP_QUERY_PAGE_COMPRESSED) { + if (disp & PMAP_QUERY_PAGE_COMPRESSED_ALTACCT) { + /* alternate accounting */ + } else { + extended->pages_swapped_out++; + } + } + /* deal with alternate accounting */ + if (obj->purgable == VM_PURGABLE_NONVOLATILE && + /* && not tagged as no-footprint? */ + VM_OBJECT_OWNER(obj) != NULL && + VM_OBJECT_OWNER(obj)->map == map) { + if ((((va + - entry->vme_start + + VME_OFFSET(entry)) + / PAGE_SIZE) < + (obj->resident_page_count + + vm_compressor_pager_get_count(obj->pager)))) { + /* + * Non-volatile purgeable object owned + * by this task: report the first + * "#resident + #compressed" pages as + * "resident" (to show that they + * contribute to the footprint) but not + * "dirty" (to avoid double-counting + * with the fake "non-volatile" region + * we'll report at the end of the + * address space to account for all + * (mapped or not) non-volatile memory + * owned by this task. + */ + extended->pages_resident++; + } + } else if ((obj->purgable == VM_PURGABLE_VOLATILE || + obj->purgable == VM_PURGABLE_EMPTY) && + /* && not tagged as no-footprint? */ + VM_OBJECT_OWNER(obj) != NULL && + VM_OBJECT_OWNER(obj)->map == map) { + if ((((va + - entry->vme_start + + VME_OFFSET(entry)) + / PAGE_SIZE) < + obj->wired_page_count)) { + /* + * Volatile|empty purgeable object owned + * by this task: report the first + * "#wired" pages as "resident" (to + * show that they contribute to the + * footprint) but not "dirty" (to avoid + * double-counting with the fake + * "non-volatile" region we'll report + * at the end of the address space to + * account for all (mapped or not) + * non-volatile memory owned by this + * task. + */ + extended->pages_resident++; } - vm_object_unlock(shadow_object); - shadow_object = next_shadow; + } else if (obj->purgable != VM_PURGABLE_DENY) { + /* + * Pages from purgeable objects + * will be reported as dirty + * appropriately in an extra + * fake memory region at the end of + * the address space. + */ + } else if (entry->iokit_acct) { + /* + * IOKit mappings are considered + * as fully dirty for footprint's + * sake. + */ + extended->pages_dirtied++; } + continue; } - extended->shadow_depth = shadow_depth; + + vm_map_region_look_for_page(map, va, obj, + offset, ref_count, + 0, extended, count); } - if (extended->shadow_depth || entry->needs_copy) - extended->share_mode = SM_COW; - else { - if (ref_count == 1) - extended->share_mode = SM_PRIVATE; - else { - if (obj->true_share) - extended->share_mode = SM_TRUESHARED; - else - extended->share_mode = SM_SHARED; + if (do_region_footprint) { + goto collect_object_info; + } + } else { +collect_object_info: + shadow_object = obj->shadow; + shadow_depth = 0; + + if (!(obj->internal)) { + extended->external_pager = 1; + } + + if (shadow_object != VM_OBJECT_NULL) { + vm_object_lock(shadow_object); + for (; + shadow_object != VM_OBJECT_NULL; + shadow_depth++) { + vm_object_t next_shadow; + + if (!(shadow_object->internal)) { + extended->external_pager = 1; + } + + next_shadow = shadow_object->shadow; + if (next_shadow) { + vm_object_lock(next_shadow); + } + vm_object_unlock(shadow_object); + shadow_object = next_shadow; } } - extended->ref_count = ref_count - extended->shadow_depth; - - for (i = 0; i < extended->shadow_depth; i++) { - if ((tmp_obj = obj->shadow) == 0) - break; - vm_object_lock(tmp_obj); - vm_object_unlock(obj); + extended->shadow_depth = shadow_depth; + } - if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress) - ref_count--; + if (extended->shadow_depth || entry->needs_copy) { + extended->share_mode = SM_COW; + } else { + if (ref_count == 1) { + extended->share_mode = SM_PRIVATE; + } else { + if (obj->true_share) { + extended->share_mode = SM_TRUESHARED; + } else { + extended->share_mode = SM_SHARED; + } + } + } + extended->ref_count = ref_count - extended->shadow_depth; - extended->ref_count += ref_count; - obj = tmp_obj; + for (i = 0; i < extended->shadow_depth; i++) { + if ((tmp_obj = obj->shadow) == 0) { + break; } + vm_object_lock(tmp_obj); vm_object_unlock(obj); - if (extended->share_mode == SM_SHARED) { - register vm_map_entry_t cur; - register vm_map_entry_t last; - int my_refs; + if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress) { + ref_count--; + } + + extended->ref_count += ref_count; + obj = tmp_obj; + } + vm_object_unlock(obj); + + if (extended->share_mode == SM_SHARED) { + vm_map_entry_t cur; + vm_map_entry_t last; + int my_refs; - obj = entry->object.vm_object; - last = vm_map_to_entry(map); - my_refs = 0; + obj = VME_OBJECT(entry); + last = vm_map_to_entry(map); + my_refs = 0; - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) - ref_count--; - for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) - my_refs += vm_map_region_count_obj_refs(cur, obj); + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { + ref_count--; + } + for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) { + my_refs += vm_map_region_count_obj_refs(cur, obj); + } - if (my_refs == ref_count) - extended->share_mode = SM_PRIVATE_ALIASED; - else if (my_refs > 1) - extended->share_mode = SM_SHARED_ALIASED; + if (my_refs == ref_count) { + extended->share_mode = SM_PRIVATE_ALIASED; + } else if (my_refs > 1) { + extended->share_mode = SM_SHARED_ALIASED; } } } @@ -9799,145 +14423,132 @@ vm_map_region_walk( static void vm_map_region_look_for_page( - __unused vm_map_t map, - __unused vm_map_offset_t va, - vm_object_t object, - vm_object_offset_t offset, - int max_refcnt, - int depth, - vm_region_extended_info_t extended) -{ - register vm_page_t p; - register vm_object_t shadow; - register int ref_count; - vm_object_t caller_object; -#if MACH_PAGEMAP - kern_return_t kr; -#endif + __unused vm_map_t map, + __unused vm_map_offset_t va, + vm_object_t object, + vm_object_offset_t offset, + int max_refcnt, + int depth, + vm_region_extended_info_t extended, + mach_msg_type_number_t count) +{ + vm_page_t p; + vm_object_t shadow; + int ref_count; + vm_object_t caller_object; + shadow = object->shadow; caller_object = object; - - while (TRUE) { - if ( !(object->pager_trusted) && !(object->internal)) + while (TRUE) { + if (!(object->internal)) { extended->external_pager = 1; + } if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { - if (shadow && (max_refcnt == 1)) - extended->pages_shared_now_private++; + if (shadow && (max_refcnt == 1)) { + extended->pages_shared_now_private++; + } - if (!p->fictitious && - (p->dirty || pmap_is_modified(p->phys_page))) - extended->pages_dirtied++; + if (!p->vmp_fictitious && + (p->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { + extended->pages_dirtied++; + } else if (count >= VM_REGION_EXTENDED_INFO_COUNT) { + if (p->vmp_reusable || object->all_reusable) { + extended->pages_reusable++; + } + } - extended->pages_resident++; + extended->pages_resident++; - if(object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } return; } -#if MACH_PAGEMAP - if (object->existence_map) { - if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_EXISTS) { - - extended->pages_swapped_out++; - - if(object != caller_object) - vm_object_unlock(object); - - return; - } - } else if (object->internal && - object->alive && - !object->terminating && - object->pager_ready) { - - memory_object_t pager; - - vm_object_paging_begin(object); - pager = object->pager; - vm_object_unlock(object); - - kr = memory_object_data_request( - pager, - offset + object->paging_offset, - 0, /* just poke the pager */ - VM_PROT_READ, - NULL); - - vm_object_lock(object); - vm_object_paging_end(object); - - if (kr == KERN_SUCCESS) { + if (object->internal && + object->alive && + !object->terminating && + object->pager_ready) { + if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) + == VM_EXTERNAL_STATE_EXISTS) { /* the pager has that page */ extended->pages_swapped_out++; - if (object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } return; } } -#endif /* MACH_PAGEMAP */ if (shadow) { vm_object_lock(shadow); - if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) - ref_count--; + if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) { + ref_count--; + } + + if (++depth > extended->shadow_depth) { + extended->shadow_depth = depth; + } - if (++depth > extended->shadow_depth) - extended->shadow_depth = depth; + if (ref_count > max_refcnt) { + max_refcnt = ref_count; + } - if (ref_count > max_refcnt) - max_refcnt = ref_count; - - if(object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } offset = offset + object->vo_shadow_offset; object = shadow; shadow = object->shadow; continue; } - if(object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } break; } } static int vm_map_region_count_obj_refs( - vm_map_entry_t entry, + vm_map_entry_t entry, vm_object_t object) { - register int ref_count; - register vm_object_t chk_obj; - register vm_object_t tmp_obj; + int ref_count; + vm_object_t chk_obj; + vm_object_t tmp_obj; - if (entry->object.vm_object == 0) - return(0); + if (VME_OBJECT(entry) == 0) { + return 0; + } - if (entry->is_sub_map) - return(0); - else { + if (entry->is_sub_map) { + return 0; + } else { ref_count = 0; - chk_obj = entry->object.vm_object; + chk_obj = VME_OBJECT(entry); vm_object_lock(chk_obj); while (chk_obj) { - if (chk_obj == object) + if (chk_obj == object) { ref_count++; + } tmp_obj = chk_obj->shadow; - if (tmp_obj) + if (tmp_obj) { vm_object_lock(tmp_obj); + } vm_object_unlock(chk_obj); chk_obj = tmp_obj; } } - return(ref_count); + return ref_count; } @@ -9956,10 +14567,10 @@ vm_map_region_count_obj_refs( */ void vm_map_simplify_entry( - vm_map_t map, - vm_map_entry_t this_entry) + vm_map_t map, + vm_map_entry_t this_entry) { - vm_map_entry_t prev_entry; + vm_map_entry_t prev_entry; counter(c_vm_map_simplify_entry_called++); @@ -9971,41 +14582,62 @@ vm_map_simplify_entry( (prev_entry->vme_end == this_entry->vme_start) && (prev_entry->is_sub_map == this_entry->is_sub_map) && + (VME_OBJECT(prev_entry) == VME_OBJECT(this_entry)) && + ((VME_OFFSET(prev_entry) + (prev_entry->vme_end - + prev_entry->vme_start)) + == VME_OFFSET(this_entry)) && - (prev_entry->object.vm_object == this_entry->object.vm_object) && - ((prev_entry->offset + (prev_entry->vme_end - - prev_entry->vme_start)) - == this_entry->offset) && - - (prev_entry->inheritance == this_entry->inheritance) && + (prev_entry->behavior == this_entry->behavior) && + (prev_entry->needs_copy == this_entry->needs_copy) && (prev_entry->protection == this_entry->protection) && (prev_entry->max_protection == this_entry->max_protection) && - (prev_entry->behavior == this_entry->behavior) && - (prev_entry->alias == this_entry->alias) && - (prev_entry->zero_wired_pages == this_entry->zero_wired_pages) && + (prev_entry->inheritance == this_entry->inheritance) && + (prev_entry->use_pmap == this_entry->use_pmap) && + (VME_ALIAS(prev_entry) == VME_ALIAS(this_entry)) && (prev_entry->no_cache == this_entry->no_cache) && + (prev_entry->permanent == this_entry->permanent) && + (prev_entry->map_aligned == this_entry->map_aligned) && + (prev_entry->zero_wired_pages == this_entry->zero_wired_pages) && + (prev_entry->used_for_jit == this_entry->used_for_jit) && + (prev_entry->pmap_cs_associated == this_entry->pmap_cs_associated) && + /* from_reserved_zone: OK if that field doesn't match */ + (prev_entry->iokit_acct == this_entry->iokit_acct) && + (prev_entry->vme_resilient_codesign == + this_entry->vme_resilient_codesign) && + (prev_entry->vme_resilient_media == + this_entry->vme_resilient_media) && + (prev_entry->vme_no_copy_on_read == this_entry->vme_no_copy_on_read) && + (prev_entry->wired_count == this_entry->wired_count) && (prev_entry->user_wired_count == this_entry->user_wired_count) && - (prev_entry->needs_copy == this_entry->needs_copy) && - (prev_entry->permanent == this_entry->permanent) && - - (prev_entry->use_pmap == FALSE) && - (this_entry->use_pmap == FALSE) && + ((prev_entry->vme_atomic == FALSE) && (this_entry->vme_atomic == FALSE)) && (prev_entry->in_transition == FALSE) && (this_entry->in_transition == FALSE) && (prev_entry->needs_wakeup == FALSE) && (this_entry->needs_wakeup == FALSE) && (prev_entry->is_shared == FALSE) && - (this_entry->is_shared == FALSE) - ) { - _vm_map_store_entry_unlink(&map->hdr, prev_entry); + (this_entry->is_shared == FALSE) && + (prev_entry->superpage_size == FALSE) && + (this_entry->superpage_size == FALSE) + ) { + vm_map_store_entry_unlink(map, prev_entry); + assert(prev_entry->vme_start < this_entry->vme_end); + if (prev_entry->map_aligned) { + assert(VM_MAP_PAGE_ALIGNED(prev_entry->vme_start, + VM_MAP_PAGE_MASK(map))); + } this_entry->vme_start = prev_entry->vme_start; - this_entry->offset = prev_entry->offset; + VME_OFFSET_SET(this_entry, VME_OFFSET(prev_entry)); + + if (map->holelistenabled) { + vm_map_store_update_first_free(map, this_entry, TRUE); + } + if (prev_entry->is_sub_map) { - vm_map_deallocate(prev_entry->object.sub_map); + vm_map_deallocate(VME_SUBMAP(prev_entry)); } else { - vm_object_deallocate(prev_entry->object.vm_object); + vm_object_deallocate(VME_OBJECT(prev_entry)); } vm_map_entry_dispose(map, prev_entry); SAVE_HINT_MAP_WRITE(map, this_entry); @@ -10015,10 +14647,10 @@ vm_map_simplify_entry( void vm_map_simplify( - vm_map_t map, - vm_map_offset_t start) + vm_map_t map, + vm_map_offset_t start) { - vm_map_entry_t this_entry; + vm_map_entry_t this_entry; vm_map_lock(map); if (vm_map_lookup_entry(map, start, &this_entry)) { @@ -10031,11 +14663,11 @@ vm_map_simplify( static void vm_map_simplify_range( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; + vm_map_entry_t entry; /* * The map should be locked (for "write") by the caller. @@ -10046,8 +14678,10 @@ vm_map_simplify_range( return; } - start = vm_map_trunc_page(start); - end = vm_map_round_page(end); + start = vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)); + end = vm_map_round_page(end, + VM_MAP_PAGE_MASK(map)); if (!vm_map_lookup_entry(map, start, &entry)) { /* "start" is not mapped and "entry" ends before "start" */ @@ -10059,9 +14693,9 @@ vm_map_simplify_range( entry = entry->vme_next; } } - + while (entry != vm_map_to_entry(map) && - entry->vme_start <= end) { + entry->vme_start <= end) { /* try and coalesce "entry" with its previous entry */ vm_map_simplify_entry(map, entry); entry = entry->vme_next; @@ -10085,71 +14719,72 @@ vm_map_simplify_range( */ kern_return_t vm_map_machine_attribute( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value) /* IN/OUT */ + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { - kern_return_t ret; + kern_return_t ret; vm_map_size_t sync_size; vm_map_entry_t entry; - - if (start < vm_map_min(map) || end > vm_map_max(map)) + + if (start < vm_map_min(map) || end > vm_map_max(map)) { return KERN_INVALID_ADDRESS; + } /* Figure how much memory we need to flush (in page increments) */ sync_size = end - start; vm_map_lock(map); - - if (attribute != MATTR_CACHE) { + + if (attribute != MATTR_CACHE) { /* If we don't have to find physical addresses, we */ /* don't have to do an explicit traversal here. */ - ret = pmap_attribute(map->pmap, start, end-start, - attribute, value); + ret = pmap_attribute(map->pmap, start, end - start, + attribute, value); vm_map_unlock(map); return ret; } - ret = KERN_SUCCESS; /* Assume it all worked */ + ret = KERN_SUCCESS; /* Assume it all worked */ - while(sync_size) { + while (sync_size) { if (vm_map_lookup_entry(map, start, &entry)) { - vm_map_size_t sub_size; - if((entry->vme_end - start) > sync_size) { + vm_map_size_t sub_size; + if ((entry->vme_end - start) > sync_size) { sub_size = sync_size; sync_size = 0; } else { sub_size = entry->vme_end - start; sync_size -= sub_size; } - if(entry->is_sub_map) { + if (entry->is_sub_map) { vm_map_offset_t sub_start; vm_map_offset_t sub_end; - sub_start = (start - entry->vme_start) - + entry->offset; + sub_start = (start - entry->vme_start) + + VME_OFFSET(entry); sub_end = sub_start + sub_size; vm_map_machine_attribute( - entry->object.sub_map, + VME_SUBMAP(entry), sub_start, sub_end, attribute, value); } else { - if(entry->object.vm_object) { - vm_page_t m; - vm_object_t object; - vm_object_t base_object; - vm_object_t last_object; - vm_object_offset_t offset; - vm_object_offset_t base_offset; - vm_map_size_t range; + if (VME_OBJECT(entry)) { + vm_page_t m; + vm_object_t object; + vm_object_t base_object; + vm_object_t last_object; + vm_object_offset_t offset; + vm_object_offset_t base_offset; + vm_map_size_t range; range = sub_size; offset = (start - entry->vme_start) - + entry->offset; + + VME_OFFSET(entry); base_offset = offset; - object = entry->object.vm_object; + object = VME_OBJECT(entry); base_object = object; last_object = NULL; @@ -10159,15 +14794,14 @@ vm_map_machine_attribute( m = vm_page_lookup( object, offset); - if (m && !m->fictitious) { - ret = - pmap_attribute_cache_sync( - m->phys_page, - PAGE_SIZE, - attribute, value); - + if (m && !m->vmp_fictitious) { + ret = + pmap_attribute_cache_sync( + VM_PAGE_GET_PHYS_PAGE(m), + PAGE_SIZE, + attribute, value); } else if (object->shadow) { - offset = offset + object->vo_shadow_offset; + offset = offset + object->vo_shadow_offset; last_object = object; object = object->shadow; vm_object_lock(last_object->shadow); @@ -10177,7 +14811,7 @@ vm_map_machine_attribute( range -= PAGE_SIZE; if (base_object != object) { - vm_object_unlock(object); + vm_object_unlock(object); vm_object_lock(base_object); object = base_object; } @@ -10193,7 +14827,6 @@ vm_map_machine_attribute( vm_map_unlock(map); return KERN_FAILURE; } - } vm_map_unlock(map); @@ -10206,22 +14839,18 @@ vm_map_machine_attribute( * * Sets the paging reference behavior of the specified address * range in the target map. Paging reference behavior affects - * how pagein operations resulting from faults on the map will be + * how pagein operations resulting from faults on the map will be * clustered. */ -kern_return_t +kern_return_t vm_map_behavior_set( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_behavior_t new_behavior) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_behavior_t new_behavior) { - register vm_map_entry_t entry; - vm_map_entry_t temp_entry; - - XPR(XPR_VM_MAP, - "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d", - map, start, end, new_behavior, 0); + vm_map_entry_t entry; + vm_map_entry_t temp_entry; if (start > end || start < vm_map_min(map) || @@ -10230,7 +14859,6 @@ vm_map_behavior_set( } switch (new_behavior) { - /* * This first block of behaviors all set a persistent state on the specified * memory range. All we have to do here is to record the desired behavior @@ -10243,10 +14871,10 @@ vm_map_behavior_set( case VM_BEHAVIOR_RSEQNTL: case VM_BEHAVIOR_ZERO_WIRED_PAGES: vm_map_lock(map); - + /* * The entire address range must be valid for the map. - * Note that vm_map_range_check() does a + * Note that vm_map_range_check() does a * vm_map_lookup_entry() internally and returns the * entry containing the start of the address range if * the entire range is valid. @@ -10254,30 +14882,31 @@ vm_map_behavior_set( if (vm_map_range_check(map, start, end, &temp_entry)) { entry = temp_entry; vm_map_clip_start(map, entry, start); - } - else { + } else { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { vm_map_clip_end(map, entry, end); - assert(!entry->use_pmap); - - if( new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES ) { + if (entry->is_sub_map) { + assert(!entry->use_pmap); + } + + if (new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES) { entry->zero_wired_pages = TRUE; } else { entry->behavior = new_behavior; } entry = entry->vme_next; } - + vm_map_unlock(map); break; /* * The rest of these are different from the above in that they cause - * an immediate action to take place as opposed to setting a behavior that + * an immediate action to take place as opposed to setting a behavior that * affects future actions. */ @@ -10299,51 +14928,46 @@ vm_map_behavior_set( case VM_BEHAVIOR_CAN_REUSE: return vm_map_can_reuse(map, start, end); +#if MACH_ASSERT + case VM_BEHAVIOR_PAGEOUT: + return vm_map_pageout(map, start, end); +#endif /* MACH_ASSERT */ + default: - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* * Internals for madvise(MADV_WILLNEED) system call. * - * The present implementation is to do a read-ahead if the mapping corresponds - * to a mapped regular file. If it's an anonymous mapping, then we do nothing - * and basically ignore the "advice" (which we are always free to do). + * The implementation is to do:- + * a) read-ahead if the mapping corresponds to a mapped regular file + * b) or, fault in the pages (zero-fill, decompress etc) if it's an anonymous mapping */ static kern_return_t vm_map_willneed( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end -) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end + ) { - vm_map_entry_t entry; - vm_object_t object; - memory_object_t pager; - struct vm_object_fault_info fault_info; - kern_return_t kr; - vm_object_size_t len; - vm_object_offset_t offset; - - /* - * Fill in static values in fault_info. Several fields get ignored by the code - * we call, but we'll fill them in anyway since uninitialized fields are bad - * when it comes to future backwards compatibility. - */ - - fault_info.interruptible = THREAD_UNINT; /* ignored value */ + vm_map_entry_t entry; + vm_object_t object; + memory_object_t pager; + struct vm_object_fault_info fault_info = {}; + kern_return_t kr; + vm_object_size_t len; + vm_object_offset_t offset; + + fault_info.interruptible = THREAD_UNINT; /* ignored value */ fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; - fault_info.no_cache = FALSE; /* ignored value */ - fault_info.stealth = TRUE; - fault_info.io_sync = FALSE; - fault_info.cs_bypass = FALSE; - fault_info.mark_zf_absent = FALSE; + fault_info.stealth = TRUE; /* * The MADV_WILLNEED operation doesn't require any changes to the @@ -10358,7 +14982,7 @@ vm_map_willneed( * an error. */ - if (! vm_map_range_check(map, start, end, &entry)) { + if (!vm_map_range_check(map, start, end, &entry)) { vm_map_unlock_read(map); return KERN_INVALID_ADDRESS; } @@ -10366,15 +14990,14 @@ vm_map_willneed( /* * Examine each vm_map_entry_t in the range. */ - for (; entry != vm_map_to_entry(map) && start < end; ) { - + for (; entry != vm_map_to_entry(map) && start < end;) { /* * The first time through, the start address could be anywhere * within the vm_map_entry we found. So adjust the offset to * correspond. After that, the offset will always be zero to * correspond to the beginning of the current vm_map_entry. */ - offset = (start - entry->vme_start) + entry->offset; + offset = (start - entry->vme_start) + VME_OFFSET(entry); /* * Set the length so we don't go beyond the end of the @@ -10393,74 +15016,108 @@ vm_map_willneed( len = (vm_size_t) (0 - PAGE_SIZE); } fault_info.cluster_size = (vm_size_t) len; - fault_info.lo_offset = offset; + fault_info.lo_offset = offset; fault_info.hi_offset = offset + len; - fault_info.user_tag = entry->alias; - - /* - * If there's no read permission to this mapping, then just - * skip it. - */ - if ((entry->protection & VM_PROT_READ) == 0) { - entry = entry->vme_next; - start = entry->vme_start; - continue; + fault_info.user_tag = VME_ALIAS(entry); + fault_info.pmap_options = 0; + if (entry->iokit_acct || + (!entry->is_sub_map && !entry->use_pmap)) { + fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; } /* - * Find the file object backing this map entry. If there is - * none, then we simply ignore the "will need" advice for this - * entry and go on to the next one. + * If the entry is a submap OR there's no read permission + * to this mapping, then just skip it. */ - if ((object = find_vnode_object(entry)) == VM_OBJECT_NULL) { + if ((entry->is_sub_map) || (entry->protection & VM_PROT_READ) == 0) { entry = entry->vme_next; start = entry->vme_start; continue; } - /* - * The data_request() could take a long time, so let's - * release the map lock to avoid blocking other threads. - */ - vm_map_unlock_read(map); + object = VME_OBJECT(entry); - vm_object_paging_begin(object); - pager = object->pager; - vm_object_unlock(object); + if (object == NULL || + (object && object->internal)) { + /* + * Memory range backed by anonymous memory. + */ + vm_size_t region_size = 0, effective_page_size = 0; + vm_map_offset_t addr = 0, effective_page_mask = 0; - /* - * Get the data from the object asynchronously. - * - * Note that memory_object_data_request() places limits on the - * amount of I/O it will do. Regardless of the len we - * specified, it won't do more than MAX_UPL_TRANSFER and it - * silently truncates the len to that size. This isn't - * necessarily bad since madvise shouldn't really be used to - * page in unlimited amounts of data. Other Unix variants - * limit the willneed case as well. If this turns out to be an - * issue for developers, then we can always adjust the policy - * here and still be backwards compatible since this is all - * just "advice". - */ - kr = memory_object_data_request( - pager, - offset + object->paging_offset, - 0, /* ignored */ - VM_PROT_READ, - (memory_object_fault_info_t)&fault_info); + region_size = len; + addr = start; - vm_object_lock(object); - vm_object_paging_end(object); - vm_object_unlock(object); + effective_page_mask = MAX(vm_map_page_mask(current_map()), PAGE_MASK); + effective_page_size = effective_page_mask + 1; - /* - * If we couldn't do the I/O for some reason, just give up on - * the madvise. We still return success to the user since - * madvise isn't supposed to fail when the advice can't be - * taken. - */ - if (kr != KERN_SUCCESS) { - return KERN_SUCCESS; + vm_map_unlock_read(map); + + while (region_size) { + vm_pre_fault( + vm_map_trunc_page(addr, effective_page_mask), + VM_PROT_READ | VM_PROT_WRITE); + + region_size -= effective_page_size; + addr += effective_page_size; + } + } else { + /* + * Find the file object backing this map entry. If there is + * none, then we simply ignore the "will need" advice for this + * entry and go on to the next one. + */ + if ((object = find_vnode_object(entry)) == VM_OBJECT_NULL) { + entry = entry->vme_next; + start = entry->vme_start; + continue; + } + + vm_object_paging_begin(object); + pager = object->pager; + vm_object_unlock(object); + + /* + * The data_request() could take a long time, so let's + * release the map lock to avoid blocking other threads. + */ + vm_map_unlock_read(map); + + /* + * Get the data from the object asynchronously. + * + * Note that memory_object_data_request() places limits on the + * amount of I/O it will do. Regardless of the len we + * specified, it won't do more than MAX_UPL_TRANSFER_BYTES and it + * silently truncates the len to that size. This isn't + * necessarily bad since madvise shouldn't really be used to + * page in unlimited amounts of data. Other Unix variants + * limit the willneed case as well. If this turns out to be an + * issue for developers, then we can always adjust the policy + * here and still be backwards compatible since this is all + * just "advice". + */ + kr = memory_object_data_request( + pager, + offset + object->paging_offset, + 0, /* ignored */ + VM_PROT_READ, + (memory_object_fault_info_t)&fault_info); + + vm_object_lock(object); + vm_object_paging_end(object); + vm_object_unlock(object); + + /* + * If we couldn't do the I/O for some reason, just give up on + * the madvise. We still return success to the user since + * madvise isn't supposed to fail when the advice can't be + * taken. + */ + + if (kr != KERN_SUCCESS) { + return KERN_SUCCESS; + } } start += len; @@ -10471,7 +15128,7 @@ vm_map_willneed( /* look up next entry */ vm_map_lock_read(map); - if (! vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry(map, start, &entry)) { /* * There's a new hole in the address range. */ @@ -10488,52 +15145,94 @@ static boolean_t vm_map_entry_is_reusable( vm_map_entry_t entry) { + /* Only user map entries */ + vm_object_t object; - if (entry->is_shared || - entry->is_sub_map || - entry->in_transition || - entry->protection != VM_PROT_DEFAULT || - entry->max_protection != VM_PROT_ALL || - entry->inheritance != VM_INHERIT_DEFAULT || - entry->no_cache || - entry->permanent || - entry->superpage_size != 0 || - entry->zero_wired_pages || - entry->wired_count != 0 || - entry->user_wired_count != 0) { + if (entry->is_sub_map) { + return FALSE; + } + + switch (VME_ALIAS(entry)) { + case VM_MEMORY_MALLOC: + case VM_MEMORY_MALLOC_SMALL: + case VM_MEMORY_MALLOC_LARGE: + case VM_MEMORY_REALLOC: + case VM_MEMORY_MALLOC_TINY: + case VM_MEMORY_MALLOC_LARGE_REUSABLE: + case VM_MEMORY_MALLOC_LARGE_REUSED: + /* + * This is a malloc() memory region: check if it's still + * in its original state and can be re-used for more + * malloc() allocations. + */ + break; + default: + /* + * Not a malloc() memory region: let the caller decide if + * it's re-usable. + */ + return TRUE; + } + + if (/*entry->is_shared ||*/ + entry->is_sub_map || + entry->in_transition || + entry->protection != VM_PROT_DEFAULT || + entry->max_protection != VM_PROT_ALL || + entry->inheritance != VM_INHERIT_DEFAULT || + entry->no_cache || + entry->permanent || + entry->superpage_size != FALSE || + entry->zero_wired_pages || + entry->wired_count != 0 || + entry->user_wired_count != 0) { return FALSE; } - object = entry->object.vm_object; + object = VME_OBJECT(entry); if (object == VM_OBJECT_NULL) { return TRUE; } - if (object->ref_count == 1 && - object->wired_page_count == 0 && - object->copy == VM_OBJECT_NULL && - object->shadow == VM_OBJECT_NULL && - object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && - object->internal && - !object->true_share && - object->wimg_bits == VM_WIMG_USE_DEFAULT && - !object->code_signed) { + if ( +#if 0 + /* + * Let's proceed even if the VM object is potentially + * shared. + * We check for this later when processing the actual + * VM pages, so the contents will be safe if shared. + * + * But we can still mark this memory region as "reusable" to + * acknowledge that the caller did let us know that the memory + * could be re-used and should not be penalized for holding + * on to it. This allows its "resident size" to not include + * the reusable range. + */ + object->ref_count == 1 && +#endif + object->wired_page_count == 0 && + object->copy == VM_OBJECT_NULL && + object->shadow == VM_OBJECT_NULL && + object->internal && + object->purgable == VM_PURGABLE_DENY && + object->copy_strategy != MEMORY_OBJECT_COPY_DELAY && + !object->true_share && + object->wimg_bits == VM_WIMG_USE_DEFAULT && + !object->code_signed) { return TRUE; } return FALSE; - - } static kern_return_t vm_map_reuse_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; - vm_object_t object; - vm_object_offset_t start_offset, end_offset; + vm_map_entry_t entry; + vm_object_t object; + vm_object_offset_t start_offset, end_offset; /* * The MADV_REUSE operation doesn't require any changes to the @@ -10541,6 +15240,7 @@ vm_map_reuse_pages( */ vm_map_lock_read(map); + assert(map->pmap != kernel_pmap); /* protect alias access */ /* * The madvise semantics require that the address range be fully @@ -10558,11 +15258,11 @@ vm_map_reuse_pages( * Examine each vm_map_entry_t in the range. */ for (; entry != vm_map_to_entry(map) && entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { /* * Sanity check on the VM map entry. */ - if (! vm_map_entry_is_reusable(entry)) { + if (!vm_map_entry_is_reusable(entry)) { vm_map_unlock_read(map); vm_page_stats_reusable.reuse_pages_failure++; return KERN_INVALID_ADDRESS; @@ -10579,18 +15279,19 @@ vm_map_reuse_pages( start_offset = 0; } end_offset = MIN(end, entry->vme_end) - entry->vme_start; - start_offset += entry->offset; - end_offset += entry->offset; + start_offset += VME_OFFSET(entry); + end_offset += VME_OFFSET(entry); - object = entry->object.vm_object; + assert(!entry->is_sub_map); + object = VME_OBJECT(entry); if (object != VM_OBJECT_NULL) { vm_object_lock(object); vm_object_reuse_pages(object, start_offset, end_offset, - TRUE); + TRUE); vm_object_unlock(object); } - if (entry->alias == VM_MEMORY_MALLOC_LARGE_REUSABLE) { + if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE_REUSABLE) { /* * XXX * We do not hold the VM map exclusively here. @@ -10599,10 +15300,10 @@ vm_map_reuse_pages( * one that can be modified while holding the VM map * "shared". */ - entry->alias = VM_MEMORY_MALLOC_LARGE_REUSED; + VME_ALIAS_SET(entry, VM_MEMORY_MALLOC_LARGE_REUSED); } } - + vm_map_unlock_read(map); vm_page_stats_reusable.reuse_pages_success++; return KERN_SUCCESS; @@ -10611,13 +15312,14 @@ vm_map_reuse_pages( static kern_return_t vm_map_reusable_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; - vm_object_t object; - vm_object_offset_t start_offset, end_offset; + vm_map_entry_t entry; + vm_object_t object; + vm_object_offset_t start_offset, end_offset; + vm_map_offset_t pmap_offset; /* * The MADV_REUSABLE operation doesn't require any changes to the @@ -10625,6 +15327,7 @@ vm_map_reusable_pages( */ vm_map_lock_read(map); + assert(map->pmap != kernel_pmap); /* protect alias access */ /* * The madvise semantics require that the address range be fully @@ -10642,18 +15345,26 @@ vm_map_reusable_pages( * Examine each vm_map_entry_t in the range. */ for (; entry != vm_map_to_entry(map) && entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { int kill_pages = 0; /* * Sanity check on the VM map entry. */ - if (! vm_map_entry_is_reusable(entry)) { + if (!vm_map_entry_is_reusable(entry)) { vm_map_unlock_read(map); vm_page_stats_reusable.reusable_pages_failure++; return KERN_INVALID_ADDRESS; } + if (!(entry->protection & VM_PROT_WRITE) && !entry->used_for_jit) { + /* not writable: can't discard contents */ + vm_map_unlock_read(map); + vm_page_stats_reusable.reusable_nonwritable++; + vm_page_stats_reusable.reusable_pages_failure++; + return KERN_PROTECTION_FAILURE; + } + /* * The first time through, the start address could be anywhere * within the vm_map_entry we found. So adjust the offset to @@ -10661,36 +15372,59 @@ vm_map_reusable_pages( */ if (entry->vme_start < start) { start_offset = start - entry->vme_start; + pmap_offset = start; } else { start_offset = 0; + pmap_offset = entry->vme_start; } end_offset = MIN(end, entry->vme_end) - entry->vme_start; - start_offset += entry->offset; - end_offset += entry->offset; + start_offset += VME_OFFSET(entry); + end_offset += VME_OFFSET(entry); - object = entry->object.vm_object; - if (object == VM_OBJECT_NULL) + assert(!entry->is_sub_map); + object = VME_OBJECT(entry); + if (object == VM_OBJECT_NULL) { continue; + } vm_object_lock(object); - if (object->ref_count == 1 && !object->shadow) + if (((object->ref_count == 1) || + (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC && + object->copy == VM_OBJECT_NULL)) && + object->shadow == VM_OBJECT_NULL && + /* + * "iokit_acct" entries are billed for their virtual size + * (rather than for their resident pages only), so they + * wouldn't benefit from making pages reusable, and it + * would be hard to keep track of pages that are both + * "iokit_acct" and "reusable" in the pmap stats and + * ledgers. + */ + !(entry->iokit_acct || + (!entry->is_sub_map && !entry->use_pmap))) { + if (object->ref_count != 1) { + vm_page_stats_reusable.reusable_shared++; + } kill_pages = 1; - else + } else { kill_pages = -1; + } if (kill_pages != -1) { vm_object_deactivate_pages(object, - start_offset, - end_offset - start_offset, - kill_pages, - TRUE /*reusable_pages*/); + start_offset, + end_offset - start_offset, + kill_pages, + TRUE /*reusable_pages*/, + map->pmap, + pmap_offset); } else { vm_page_stats_reusable.reusable_pages_shared++; } vm_object_unlock(object); - if (entry->alias == VM_MEMORY_MALLOC_LARGE || - entry->alias == VM_MEMORY_MALLOC_LARGE_REUSED) { + if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE || + VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE_REUSED) { /* * XXX * We do not hold the VM map exclusively here. @@ -10699,10 +15433,10 @@ vm_map_reusable_pages( * one that can be modified while holding the VM map * "shared". */ - entry->alias = VM_MEMORY_MALLOC_LARGE_REUSABLE; + VME_ALIAS_SET(entry, VM_MEMORY_MALLOC_LARGE_REUSABLE); } } - + vm_map_unlock_read(map); vm_page_stats_reusable.reusable_pages_success++; return KERN_SUCCESS; @@ -10711,11 +15445,11 @@ vm_map_reusable_pages( static kern_return_t vm_map_can_reuse( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; + vm_map_entry_t entry; /* * The MADV_REUSABLE operation doesn't require any changes to the @@ -10723,6 +15457,7 @@ vm_map_can_reuse( */ vm_map_lock_read(map); + assert(map->pmap != kernel_pmap); /* protect alias access */ /* * The madvise semantics require that the address range be fully @@ -10740,357 +15475,183 @@ vm_map_can_reuse( * Examine each vm_map_entry_t in the range. */ for (; entry != vm_map_to_entry(map) && entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { /* * Sanity check on the VM map entry. */ - if (! vm_map_entry_is_reusable(entry)) { + if (!vm_map_entry_is_reusable(entry)) { vm_map_unlock_read(map); vm_page_stats_reusable.can_reuse_failure++; return KERN_INVALID_ADDRESS; } } - + vm_map_unlock_read(map); vm_page_stats_reusable.can_reuse_success++; return KERN_SUCCESS; } - -#include -#if MACH_KDB -#include -#include - -#define printf db_printf - -/* - * Forward declarations for internal functions. - */ -extern void vm_map_links_print( - struct vm_map_links *links); - -extern void vm_map_header_print( - struct vm_map_header *header); - -extern void vm_map_entry_print( - vm_map_entry_t entry); - -extern void vm_follow_entry( - vm_map_entry_t entry); - -extern void vm_follow_map( - vm_map_t map); - -/* - * vm_map_links_print: [ debug ] - */ -void -vm_map_links_print( - struct vm_map_links *links) -{ - iprintf("prev = %08X next = %08X start = %016llX end = %016llX\n", - links->prev, - links->next, - (unsigned long long)links->start, - (unsigned long long)links->end); -} - -/* - * vm_map_header_print: [ debug ] - */ -void -vm_map_header_print( - struct vm_map_header *header) -{ - vm_map_links_print(&header->links); - iprintf("nentries = %08X, %sentries_pageable\n", - header->nentries, - (header->entries_pageable ? "" : "!")); -} - -/* - * vm_follow_entry: [ debug ] - */ -void -vm_follow_entry( - vm_map_entry_t entry) -{ - int shadows; - - iprintf("map entry %08X\n", entry); - - db_indent += 2; - - shadows = vm_follow_object(entry->object.vm_object); - iprintf("Total objects : %d\n",shadows); - - db_indent -= 2; -} - -/* - * vm_map_entry_print: [ debug ] - */ -void -vm_map_entry_print( - register vm_map_entry_t entry) -{ - static const char *inheritance_name[4] = - { "share", "copy", "none", "?"}; - static const char *behavior_name[4] = - { "dflt", "rand", "seqtl", "rseqntl" }; - - iprintf("map entry %08X - prev = %08X next = %08X\n", entry, entry->vme_prev, entry->vme_next); - - db_indent += 2; - - vm_map_links_print(&entry->links); - - iprintf("start = %016llX end = %016llX - prot=%x/%x/%s\n", - (unsigned long long)entry->vme_start, - (unsigned long long)entry->vme_end, - entry->protection, - entry->max_protection, - inheritance_name[(entry->inheritance & 0x3)]); - - iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n", - behavior_name[(entry->behavior & 0x3)], - entry->wired_count, - entry->user_wired_count); - iprintf("%sin_transition, %sneeds_wakeup\n", - (entry->in_transition ? "" : "!"), - (entry->needs_wakeup ? "" : "!")); - - if (entry->is_sub_map) { - iprintf("submap = %08X - offset = %016llX\n", - entry->object.sub_map, - (unsigned long long)entry->offset); - } else { - iprintf("object = %08X offset = %016llX - ", - entry->object.vm_object, - (unsigned long long)entry->offset); - printf("%sis_shared, %sneeds_copy\n", - (entry->is_shared ? "" : "!"), - (entry->needs_copy ? "" : "!")); - } - - db_indent -= 2; -} - -/* - * vm_follow_map: [ debug ] - */ -void -vm_follow_map( - vm_map_t map) -{ - register vm_map_entry_t entry; - - iprintf("task map %08X\n", map); - - db_indent += 2; - - for (entry = vm_map_first_entry(map); - entry && entry != vm_map_to_entry(map); - entry = entry->vme_next) { - vm_follow_entry(entry); - } - - db_indent -= 2; -} - -/* - * vm_map_print: [ debug ] - */ -void -vm_map_print( - db_addr_t inmap) +#if MACH_ASSERT +static kern_return_t +vm_map_pageout( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - register vm_map_entry_t entry; - vm_map_t map; -#if TASK_SWAPPER - char *swstate; -#endif /* TASK_SWAPPER */ - - map = (vm_map_t)(long) - inmap; /* Make sure we have the right type */ - - iprintf("task map %08X\n", map); - - db_indent += 2; + vm_map_entry_t entry; - vm_map_header_print(&map->hdr); - - iprintf("pmap = %08X size = %08X ref = %d hint = %08X first_free = %08X\n", - map->pmap, - map->size, - map->ref_count, - map->hint, - map->first_free); + /* + * The MADV_PAGEOUT operation doesn't require any changes to the + * vm_map_entry_t's, so the read lock is sufficient. + */ - iprintf("%swait_for_space, %swiring_required, timestamp = %d\n", - (map->wait_for_space ? "" : "!"), - (map->wiring_required ? "" : "!"), - map->timestamp); + vm_map_lock_read(map); -#if TASK_SWAPPER - switch (map->sw_state) { - case MAP_SW_IN: - swstate = "SW_IN"; - break; - case MAP_SW_OUT: - swstate = "SW_OUT"; - break; - default: - swstate = "????"; - break; - } - iprintf("res = %d, sw_state = %s\n", map->res_count, swstate); -#endif /* TASK_SWAPPER */ + /* + * The madvise semantics require that the address range be fully + * allocated with no holes. Otherwise, we're required to return + * an error. + */ - for (entry = vm_map_first_entry(map); - entry && entry != vm_map_to_entry(map); - entry = entry->vme_next) { - vm_map_entry_print(entry); + if (!vm_map_range_check(map, start, end, &entry)) { + vm_map_unlock_read(map); + return KERN_INVALID_ADDRESS; } - db_indent -= 2; -} - -/* - * Routine: vm_map_copy_print - * Purpose: - * Pretty-print a copy object for ddb. - */ - -void -vm_map_copy_print( - db_addr_t incopy) -{ - vm_map_copy_t copy; - vm_map_entry_t entry; - - copy = (vm_map_copy_t)(long) - incopy; /* Make sure we have the right type */ - - printf("copy object 0x%x\n", copy); + /* + * Examine each vm_map_entry_t in the range. + */ + for (; entry != vm_map_to_entry(map) && entry->vme_start < end; + entry = entry->vme_next) { + vm_object_t object; - db_indent += 2; + /* + * Sanity check on the VM map entry. + */ + if (entry->is_sub_map) { + vm_map_t submap; + vm_map_offset_t submap_start; + vm_map_offset_t submap_end; + vm_map_entry_t submap_entry; + + submap = VME_SUBMAP(entry); + submap_start = VME_OFFSET(entry); + submap_end = submap_start + (entry->vme_end - + entry->vme_start); + + vm_map_lock_read(submap); + + if (!vm_map_range_check(submap, + submap_start, + submap_end, + &submap_entry)) { + vm_map_unlock_read(submap); + vm_map_unlock_read(map); + return KERN_INVALID_ADDRESS; + } - iprintf("type=%d", copy->type); - switch (copy->type) { - case VM_MAP_COPY_ENTRY_LIST: - printf("[entry_list]"); - break; - - case VM_MAP_COPY_OBJECT: - printf("[object]"); - break; - - case VM_MAP_COPY_KERNEL_BUFFER: - printf("[kernel_buffer]"); - break; + object = VME_OBJECT(submap_entry); + if (submap_entry->is_sub_map || + object == VM_OBJECT_NULL || + !object->internal) { + vm_map_unlock_read(submap); + continue; + } - default: - printf("[bad type]"); - break; - } - printf(", offset=0x%llx", (unsigned long long)copy->offset); - printf(", size=0x%x\n", copy->size); + vm_object_pageout(object); - switch (copy->type) { - case VM_MAP_COPY_ENTRY_LIST: - vm_map_header_print(©->cpy_hdr); - for (entry = vm_map_copy_first_entry(copy); - entry && entry != vm_map_copy_to_entry(copy); - entry = entry->vme_next) { - vm_map_entry_print(entry); + vm_map_unlock_read(submap); + submap = VM_MAP_NULL; + submap_entry = VM_MAP_ENTRY_NULL; + continue; } - break; - - case VM_MAP_COPY_OBJECT: - iprintf("object=0x%x\n", copy->cpy_object); - break; - - case VM_MAP_COPY_KERNEL_BUFFER: - iprintf("kernel buffer=0x%x", copy->cpy_kdata); - printf(", kalloc_size=0x%x\n", copy->cpy_kalloc_size); - break; - } - - db_indent -=2; -} - -/* - * db_vm_map_total_size(map) [ debug ] - * - * return the total virtual size (in bytes) of the map - */ -vm_map_size_t -db_vm_map_total_size( - db_addr_t inmap) -{ - vm_map_entry_t entry; - vm_map_size_t total; - vm_map_t map; - - map = (vm_map_t)(long) - inmap; /* Make sure we have the right type */ + object = VME_OBJECT(entry); + if (entry->is_sub_map || + object == VM_OBJECT_NULL || + !object->internal) { + continue; + } - total = 0; - for (entry = vm_map_first_entry(map); - entry != vm_map_to_entry(map); - entry = entry->vme_next) { - total += entry->vme_end - entry->vme_start; + vm_object_pageout(object); } - return total; + vm_map_unlock_read(map); + return KERN_SUCCESS; } +#endif /* MACH_ASSERT */ -#endif /* MACH_KDB */ /* * Routine: vm_map_entry_insert * - * Descritpion: This routine inserts a new vm_entry in a locked map. + * Description: This routine inserts a new vm_entry in a locked map. */ vm_map_entry_t vm_map_entry_insert( - vm_map_t map, - vm_map_entry_t insp_entry, - vm_map_offset_t start, - vm_map_offset_t end, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - boolean_t is_shared, - boolean_t in_transition, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_behavior_t behavior, - vm_inherit_t inheritance, - unsigned wired_count, - boolean_t no_cache, - boolean_t permanent, - unsigned int superpage_size) -{ - vm_map_entry_t new_entry; + vm_map_t map, + vm_map_entry_t insp_entry, + vm_map_offset_t start, + vm_map_offset_t end, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + boolean_t is_shared, + boolean_t in_transition, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_behavior_t behavior, + vm_inherit_t inheritance, + unsigned wired_count, + boolean_t no_cache, + boolean_t permanent, + boolean_t no_copy_on_read, + unsigned int superpage_size, + boolean_t clear_map_aligned, + boolean_t is_submap, + boolean_t used_for_jit, + int alias) +{ + vm_map_entry_t new_entry; assert(insp_entry != (vm_map_entry_t)0); + vm_map_lock_assert_exclusive(map); + +#if DEVELOPMENT || DEBUG + vm_object_offset_t end_offset = 0; + assertf(!os_add_overflow(end - start, offset, &end_offset), "size 0x%llx, offset 0x%llx caused overflow", (uint64_t)(end - start), offset); +#endif /* DEVELOPMENT || DEBUG */ - new_entry = vm_map_entry_create(map); + new_entry = vm_map_entry_create(map, !map->hdr.entries_pageable); + + if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) { + new_entry->map_aligned = TRUE; + } else { + new_entry->map_aligned = FALSE; + } + if (clear_map_aligned && + (!VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)) || + !VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)))) { + new_entry->map_aligned = FALSE; + } new_entry->vme_start = start; new_entry->vme_end = end; assert(page_aligned(new_entry->vme_start)); assert(page_aligned(new_entry->vme_end)); + if (new_entry->map_aligned) { + assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, + VM_MAP_PAGE_MASK(map))); + assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, + VM_MAP_PAGE_MASK(map))); + } + assert(new_entry->vme_start < new_entry->vme_end); - new_entry->object.vm_object = object; - new_entry->offset = offset; + VME_OBJECT_SET(new_entry, object); + VME_OFFSET_SET(new_entry, offset); new_entry->is_shared = is_shared; - new_entry->is_sub_map = FALSE; + new_entry->is_sub_map = is_submap; new_entry->needs_copy = needs_copy; new_entry->in_transition = in_transition; new_entry->needs_wakeup = FALSE; @@ -11100,19 +15661,52 @@ vm_map_entry_insert( new_entry->behavior = behavior; new_entry->wired_count = wired_count; new_entry->user_wired_count = 0; - new_entry->use_pmap = FALSE; - new_entry->alias = 0; + if (is_submap) { + /* + * submap: "use_pmap" means "nested". + * default: false. + */ + new_entry->use_pmap = FALSE; + } else { + /* + * object: "use_pmap" means "use pmap accounting" for footprint. + * default: true. + */ + new_entry->use_pmap = TRUE; + } + VME_ALIAS_SET(new_entry, alias); new_entry->zero_wired_pages = FALSE; new_entry->no_cache = no_cache; new_entry->permanent = permanent; - new_entry->superpage_size = superpage_size; - new_entry->used_for_jit = FALSE; + if (superpage_size) { + new_entry->superpage_size = TRUE; + } else { + new_entry->superpage_size = FALSE; + } + if (used_for_jit) { +#if CONFIG_EMBEDDED + if (!(map->jit_entry_exists)) +#endif /* CONFIG_EMBEDDED */ + { + new_entry->used_for_jit = TRUE; + map->jit_entry_exists = TRUE; + } + } else { + new_entry->used_for_jit = FALSE; + } + new_entry->pmap_cs_associated = FALSE; + new_entry->iokit_acct = FALSE; + new_entry->vme_resilient_codesign = FALSE; + new_entry->vme_resilient_media = FALSE; + new_entry->vme_atomic = FALSE; + new_entry->vme_no_copy_on_read = no_copy_on_read; /* * Insert the new entry into the list. */ - vm_map_store_entry_link(map, insp_entry, new_entry); + vm_map_store_entry_link(map, insp_entry, new_entry, + VM_MAP_KERNEL_FLAGS_NONE); map->size += end - start; /* @@ -11130,53 +15724,66 @@ vm_map_entry_insert( */ static kern_return_t vm_map_remap_extract( - vm_map_t map, - vm_map_offset_t addr, - vm_map_size_t size, - boolean_t copy, - struct vm_map_header *map_header, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, + vm_map_t map, + vm_map_offset_t addr, + vm_map_size_t size, + boolean_t copy, + struct vm_map_header *map_header, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, /* What, no behavior? */ - vm_inherit_t inheritance, - boolean_t pageable) -{ - kern_return_t result; - vm_map_size_t mapped_size; - vm_map_size_t tmp_size; - vm_map_entry_t src_entry; /* result of last map lookup */ - vm_map_entry_t new_entry; - vm_object_offset_t offset; - vm_map_offset_t map_address; - vm_map_offset_t src_start; /* start of entry to map */ - vm_map_offset_t src_end; /* end of region to be mapped */ - vm_object_t object; - vm_map_version_t version; - boolean_t src_needs_copy; - boolean_t new_entry_needs_copy; + vm_inherit_t inheritance, + boolean_t pageable, + boolean_t same_map, + vm_map_kernel_flags_t vmk_flags) +{ + kern_return_t result; + vm_map_size_t mapped_size; + vm_map_size_t tmp_size; + vm_map_entry_t src_entry; /* result of last map lookup */ + vm_map_entry_t new_entry; + vm_object_offset_t offset; + vm_map_offset_t map_address; + vm_map_offset_t src_start; /* start of entry to map */ + vm_map_offset_t src_end; /* end of region to be mapped */ + vm_object_t object; + vm_map_version_t version; + boolean_t src_needs_copy; + boolean_t new_entry_needs_copy; + vm_map_entry_t saved_src_entry; + boolean_t src_entry_was_wired; + vm_prot_t max_prot_for_prot_copy; assert(map != VM_MAP_NULL); - assert(size != 0 && size == vm_map_round_page(size)); + assert(size != 0); + assert(size == vm_map_round_page(size, PAGE_MASK)); assert(inheritance == VM_INHERIT_NONE || - inheritance == VM_INHERIT_COPY || - inheritance == VM_INHERIT_SHARE); + inheritance == VM_INHERIT_COPY || + inheritance == VM_INHERIT_SHARE); /* * Compute start and end of region. */ - src_start = vm_map_trunc_page(addr); - src_end = vm_map_round_page(src_start + size); + src_start = vm_map_trunc_page(addr, PAGE_MASK); + src_end = vm_map_round_page(src_start + size, PAGE_MASK); + /* * Initialize map_header. */ - map_header->links.next = (struct vm_map_entry *)&map_header->links; - map_header->links.prev = (struct vm_map_entry *)&map_header->links; + map_header->links.next = CAST_TO_VM_MAP_ENTRY(&map_header->links); + map_header->links.prev = CAST_TO_VM_MAP_ENTRY(&map_header->links); map_header->nentries = 0; map_header->entries_pageable = pageable; + map_header->page_shift = PAGE_SHIFT; vm_map_store_init( map_header ); + if (copy && vmk_flags.vmkf_remap_prot_copy) { + max_prot_for_prot_copy = *max_protection & VM_PROT_ALL; + } else { + max_prot_for_prot_copy = VM_PROT_NONE; + } *cur_protection = VM_PROT_ALL; *max_protection = VM_PROT_ALL; @@ -11184,18 +15791,18 @@ vm_map_remap_extract( mapped_size = 0; result = KERN_SUCCESS; - /* + /* * The specified source virtual space might correspond to * multiple map entries, need to loop on them. */ vm_map_lock(map); while (mapped_size != size) { - vm_map_size_t entry_size; + vm_map_size_t entry_size; /* * Find the beginning of the region. - */ - if (! vm_map_lookup_entry(map, src_start, &src_entry)) { + */ + if (!vm_map_lookup_entry(map, src_start, &src_entry)) { result = KERN_INVALID_ADDRESS; break; } @@ -11207,24 +15814,61 @@ vm_map_remap_extract( } tmp_size = size - mapped_size; - if (src_end > src_entry->vme_end) + if (src_end > src_entry->vme_end) { tmp_size -= (src_end - src_entry->vme_end); + } entry_size = (vm_map_size_t)(src_entry->vme_end - - src_entry->vme_start); + src_entry->vme_start); - if(src_entry->is_sub_map) { - vm_map_reference(src_entry->object.sub_map); + if (src_entry->is_sub_map) { + vm_map_reference(VME_SUBMAP(src_entry)); object = VM_OBJECT_NULL; } else { - object = src_entry->object.vm_object; + object = VME_OBJECT(src_entry); + if (src_entry->iokit_acct) { + /* + * This entry uses "IOKit accounting". + */ + } else if (object != VM_OBJECT_NULL && + (object->purgable != VM_PURGABLE_DENY || + object->vo_ledger_tag != VM_LEDGER_TAG_NONE)) { + /* + * Purgeable objects have their own accounting: + * no pmap accounting for them. + */ + assertf(!src_entry->use_pmap, + "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d", + map, + src_entry, + (uint64_t)src_entry->vme_start, + (uint64_t)src_entry->vme_end, + src_entry->protection, + src_entry->max_protection, + VME_ALIAS(src_entry)); + } else { + /* + * Not IOKit or purgeable: + * must be accounted by pmap stats. + */ + assertf(src_entry->use_pmap, + "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d", + map, + src_entry, + (uint64_t)src_entry->vme_start, + (uint64_t)src_entry->vme_end, + src_entry->protection, + src_entry->max_protection, + VME_ALIAS(src_entry)); + } if (object == VM_OBJECT_NULL) { object = vm_object_allocate(entry_size); - src_entry->offset = 0; - src_entry->object.vm_object = object; + VME_OFFSET_SET(src_entry, 0); + VME_OBJECT_SET(src_entry, object); + assert(src_entry->use_pmap); } else if (object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC) { + MEMORY_OBJECT_COPY_SYMMETRIC) { /* * We are already using an asymmetric * copy, and therefore we already have @@ -11232,119 +15876,191 @@ vm_map_remap_extract( */ assert(!src_entry->needs_copy); } else if (src_entry->needs_copy || object->shadowed || - (object->internal && !object->true_share && - !src_entry->is_shared && - object->vo_size > entry_size)) { - - vm_object_shadow(&src_entry->object.vm_object, - &src_entry->offset, - entry_size); + (object->internal && !object->true_share && + !src_entry->is_shared && + object->vo_size > entry_size)) { + VME_OBJECT_SHADOW(src_entry, entry_size); + assert(src_entry->use_pmap); if (!src_entry->needs_copy && (src_entry->protection & VM_PROT_WRITE)) { - vm_prot_t prot; + vm_prot_t prot; + + assert(!pmap_has_prot_policy(src_entry->protection)); + + prot = src_entry->protection & ~VM_PROT_WRITE; - prot = src_entry->protection & ~VM_PROT_WRITE; + if (override_nx(map, + VME_ALIAS(src_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } - if (override_nx(map, src_entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + assert(!pmap_has_prot_policy(prot)); - if(map->mapped) { + if (map->mapped_in_other_pmaps) { vm_object_pmap_protect( - src_entry->object.vm_object, - src_entry->offset, + VME_OBJECT(src_entry), + VME_OFFSET(src_entry), entry_size, PMAP_NULL, src_entry->vme_start, prot); } else { pmap_protect(vm_map_pmap(map), - src_entry->vme_start, - src_entry->vme_end, - prot); + src_entry->vme_start, + src_entry->vme_end, + prot); } } - object = src_entry->object.vm_object; + object = VME_OBJECT(src_entry); src_entry->needs_copy = FALSE; } vm_object_lock(object); vm_object_reference_locked(object); /* object ref. for new entry */ - if (object->copy_strategy == + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { - object->copy_strategy = - MEMORY_OBJECT_COPY_DELAY; + object->copy_strategy = + MEMORY_OBJECT_COPY_DELAY; } vm_object_unlock(object); } - offset = src_entry->offset + (src_start - src_entry->vme_start); + offset = (VME_OFFSET(src_entry) + + (src_start - src_entry->vme_start)); - new_entry = _vm_map_entry_create(map_header); + new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); vm_map_entry_copy(new_entry, src_entry); - new_entry->use_pmap = FALSE; /* clr address space specifics */ + if (new_entry->is_sub_map) { + /* clr address space specifics */ + new_entry->use_pmap = FALSE; + } else if (copy) { + /* + * We're dealing with a copy-on-write operation, + * so the resulting mapping should not inherit the + * original mapping's accounting settings. + * "use_pmap" should be reset to its default (TRUE) + * so that the new mapping gets accounted for in + * the task's memory footprint. + */ + new_entry->use_pmap = TRUE; + } + /* "iokit_acct" was cleared in vm_map_entry_copy() */ + assert(!new_entry->iokit_acct); + + new_entry->map_aligned = FALSE; new_entry->vme_start = map_address; new_entry->vme_end = map_address + tmp_size; - new_entry->inheritance = inheritance; - new_entry->offset = offset; + assert(new_entry->vme_start < new_entry->vme_end); + if (copy && vmk_flags.vmkf_remap_prot_copy) { + /* + * Remapping for vm_map_protect(VM_PROT_COPY) + * to convert a read-only mapping into a + * copy-on-write version of itself but + * with write access: + * keep the original inheritance and add + * VM_PROT_WRITE to the max protection. + */ + new_entry->inheritance = src_entry->inheritance; + new_entry->protection &= max_prot_for_prot_copy; + new_entry->max_protection |= VM_PROT_WRITE; + } else { + new_entry->inheritance = inheritance; + } + VME_OFFSET_SET(new_entry, offset); /* * The new region has to be copied now if required. */ - RestartCopy: +RestartCopy: if (!copy) { + if (src_entry->used_for_jit == TRUE) { + if (same_map) { +#if __APRR_SUPPORTED__ + /* + * Disallow re-mapping of any JIT regions on APRR devices. + */ + result = KERN_PROTECTION_FAILURE; + break; +#endif /* __APRR_SUPPORTED__*/ + } else { +#if CONFIG_EMBEDDED + /* + * Cannot allow an entry describing a JIT + * region to be shared across address spaces. + */ + result = KERN_INVALID_ARGUMENT; + break; +#endif /* CONFIG_EMBEDDED */ + } + } + src_entry->is_shared = TRUE; new_entry->is_shared = TRUE; - if (!(new_entry->is_sub_map)) + if (!(new_entry->is_sub_map)) { new_entry->needs_copy = FALSE; - + } } else if (src_entry->is_sub_map) { /* make this a COW sub_map if not already */ + assert(new_entry->wired_count == 0); new_entry->needs_copy = TRUE; object = VM_OBJECT_NULL; } else if (src_entry->wired_count == 0 && - vm_object_copy_quickly(&new_entry->object.vm_object, - new_entry->offset, - (new_entry->vme_end - - new_entry->vme_start), - &src_needs_copy, - &new_entry_needs_copy)) { - + vm_object_copy_quickly(VME_OBJECT_PTR(new_entry), + VME_OFFSET(new_entry), + (new_entry->vme_end - + new_entry->vme_start), + &src_needs_copy, + &new_entry_needs_copy)) { new_entry->needs_copy = new_entry_needs_copy; new_entry->is_shared = FALSE; + assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry); /* * Handle copy_on_write semantics. */ if (src_needs_copy && !src_entry->needs_copy) { - vm_prot_t prot; + vm_prot_t prot; + + assert(!pmap_has_prot_policy(src_entry->protection)); prot = src_entry->protection & ~VM_PROT_WRITE; - if (override_nx(map, src_entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(map, + VME_ALIAS(src_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } + + assert(!pmap_has_prot_policy(prot)); vm_object_pmap_protect(object, - offset, - entry_size, - ((src_entry->is_shared - || map->mapped) ? - PMAP_NULL : map->pmap), - src_entry->vme_start, - prot); - + offset, + entry_size, + ((src_entry->is_shared + || map->mapped_in_other_pmaps) ? + PMAP_NULL : map->pmap), + src_entry->vme_start, + prot); + + assert(src_entry->wired_count == 0); src_entry->needs_copy = TRUE; } /* * Throw away the old object reference of the new entry. */ vm_object_deallocate(object); - } else { new_entry->is_shared = FALSE; + assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry); + + src_entry_was_wired = (src_entry->wired_count > 0); + saved_src_entry = src_entry; + src_entry = VM_MAP_ENTRY_NULL; /* * The map can be safely unlocked since we @@ -11354,30 +16070,38 @@ vm_map_remap_extract( * verification, and unlock the map. */ version.main_timestamp = map->timestamp; - vm_map_unlock(map); /* Increments timestamp once! */ + vm_map_unlock(map); /* Increments timestamp once! */ /* * Perform the copy. */ - if (src_entry->wired_count > 0) { + if (src_entry_was_wired > 0) { vm_object_lock(object); result = vm_object_copy_slowly( object, offset, - entry_size, + (new_entry->vme_end - + new_entry->vme_start), THREAD_UNINT, - &new_entry->object.vm_object); + VME_OBJECT_PTR(new_entry)); - new_entry->offset = 0; + VME_OFFSET_SET(new_entry, 0); new_entry->needs_copy = FALSE; } else { + vm_object_offset_t new_offset; + + new_offset = VME_OFFSET(new_entry); result = vm_object_copy_strategically( object, offset, - entry_size, - &new_entry->object.vm_object, - &new_entry->offset, + (new_entry->vme_end - + new_entry->vme_start), + VME_OBJECT_PTR(new_entry), + &new_offset, &new_entry_needs_copy); + if (new_offset != VME_OFFSET(new_entry)) { + VME_OFFSET_SET(new_entry, new_offset); + } new_entry->needs_copy = new_entry_needs_copy; } @@ -11390,6 +16114,7 @@ vm_map_remap_extract( if (result != KERN_SUCCESS && result != KERN_MEMORY_RESTART_COPY) { _vm_map_entry_dispose(map_header, new_entry); + vm_map_lock(map); break; } @@ -11406,13 +16131,17 @@ vm_map_remap_extract( * Retry the lookup and verify that the * same object/offset are still present. */ - vm_object_deallocate(new_entry-> - object.vm_object); + saved_src_entry = VM_MAP_ENTRY_NULL; + vm_object_deallocate(VME_OBJECT(new_entry)); _vm_map_entry_dispose(map_header, new_entry); - if (result == KERN_MEMORY_RESTART_COPY) + if (result == KERN_MEMORY_RESTART_COPY) { result = KERN_SUCCESS; + } continue; } + /* map hasn't changed: src_entry is still valid */ + src_entry = saved_src_entry; + saved_src_entry = VM_MAP_ENTRY_NULL; if (result == KERN_MEMORY_RESTART_COPY) { vm_object_reference(object); @@ -11421,17 +16150,16 @@ vm_map_remap_extract( } _vm_map_store_entry_link(map_header, - map_header->links.prev, new_entry); + map_header->links.prev, new_entry); /*Protections for submap mapping are irrelevant here*/ - if( !src_entry->is_sub_map ) { + if (!src_entry->is_sub_map) { *cur_protection &= src_entry->protection; *max_protection &= src_entry->max_protection; } map_address += tmp_size; mapped_size += tmp_size; src_start += tmp_size; - } /* end while */ vm_map_unlock(map); @@ -11440,11 +16168,15 @@ vm_map_remap_extract( * Free all allocated elements. */ for (src_entry = map_header->links.next; - src_entry != (struct vm_map_entry *)&map_header->links; - src_entry = new_entry) { + src_entry != CAST_TO_VM_MAP_ENTRY(&map_header->links); + src_entry = new_entry) { new_entry = src_entry->vme_next; _vm_map_store_entry_unlink(map_header, src_entry); - vm_object_deallocate(src_entry->object.vm_object); + if (src_entry->is_sub_map) { + vm_map_deallocate(VME_SUBMAP(src_entry)); + } else { + vm_object_deallocate(VME_OBJECT(src_entry)); + } _vm_map_entry_dispose(map_header, src_entry); } } @@ -11464,47 +16196,88 @@ vm_map_remap_extract( */ kern_return_t vm_map_remap( - vm_map_t target_map, - vm_map_address_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_t src_map, - vm_map_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) -{ - kern_return_t result; - vm_map_entry_t entry; - vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL; - vm_map_entry_t new_entry; - struct vm_map_header map_header; - - if (target_map == VM_MAP_NULL) + vm_map_t target_map, + vm_map_address_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_t src_map, + vm_map_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) +{ + kern_return_t result; + vm_map_entry_t entry; + vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL; + vm_map_entry_t new_entry; + struct vm_map_header map_header; + vm_map_offset_t offset_in_mapping; + + if (target_map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } switch (inheritance) { case VM_INHERIT_NONE: case VM_INHERIT_COPY: case VM_INHERIT_SHARE: - if (size != 0 && src_map != VM_MAP_NULL) + if (size != 0 && src_map != VM_MAP_NULL) { break; - /*FALL THRU*/ + } + /*FALL THRU*/ default: return KERN_INVALID_ARGUMENT; } - size = vm_map_round_page(size); + /* + * If the user is requesting that we return the address of the + * first byte of the data (rather than the base of the page), + * then we use different rounding semantics: specifically, + * we assume that (memory_address, size) describes a region + * all of whose pages we must cover, rather than a base to be truncated + * down and a size to be added to that base. So we figure out + * the highest page that the requested region includes and make + * sure that the size will cover it. + * + * The key example we're worried about it is of the form: + * + * memory_address = 0x1ff0, size = 0x20 + * + * With the old semantics, we round down the memory_address to 0x1000 + * and round up the size to 0x1000, resulting in our covering *only* + * page 0x1000. With the new semantics, we'd realize that the region covers + * 0x1ff0-0x2010, and compute a size of 0x2000. Thus, we cover both page + * 0x1000 and page 0x2000 in the region we remap. + */ + if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) { + offset_in_mapping = memory_address - vm_map_trunc_page(memory_address, PAGE_MASK); + size = vm_map_round_page(memory_address + size - vm_map_trunc_page(memory_address, PAGE_MASK), PAGE_MASK); + } else { + size = vm_map_round_page(size, PAGE_MASK); + } + if (size == 0) { + return KERN_INVALID_ARGUMENT; + } + + if (flags & VM_FLAGS_RESILIENT_MEDIA) { + /* must be copy-on-write to be "media resilient" */ + if (!copy) { + return KERN_INVALID_ARGUMENT; + } + } result = vm_map_remap_extract(src_map, memory_address, - size, copy, &map_header, - cur_protection, - max_protection, - inheritance, - target_map->hdr. - entries_pageable); + size, copy, &map_header, + cur_protection, + max_protection, + inheritance, + target_map->hdr.entries_pageable, + src_map == target_map, + vmk_flags); if (result != KERN_SUCCESS) { return result; @@ -11514,33 +16287,55 @@ vm_map_remap( * Allocate/check a range of free virtual address * space for the target */ - *address = vm_map_trunc_page(*address); + *address = vm_map_trunc_page(*address, + VM_MAP_PAGE_MASK(target_map)); vm_map_lock(target_map); result = vm_map_remap_range_allocate(target_map, address, size, - mask, flags, &insp_entry); + mask, flags, vmk_flags, tag, + &insp_entry); for (entry = map_header.links.next; - entry != (struct vm_map_entry *)&map_header.links; - entry = new_entry) { + entry != CAST_TO_VM_MAP_ENTRY(&map_header.links); + entry = new_entry) { new_entry = entry->vme_next; _vm_map_store_entry_unlink(&map_header, entry); if (result == KERN_SUCCESS) { + if (flags & VM_FLAGS_RESILIENT_CODESIGN) { + /* no codesigning -> read-only access */ + entry->max_protection = VM_PROT_READ; + entry->protection = VM_PROT_READ; + entry->vme_resilient_codesign = TRUE; + } entry->vme_start += *address; entry->vme_end += *address; - vm_map_store_entry_link(target_map, insp_entry, entry); + assert(!entry->map_aligned); + if ((flags & VM_FLAGS_RESILIENT_MEDIA) && + !entry->is_sub_map && + (VME_OBJECT(entry) == VM_OBJECT_NULL || + VME_OBJECT(entry)->internal)) { + entry->vme_resilient_media = TRUE; + } + vm_map_store_entry_link(target_map, insp_entry, entry, + vmk_flags); insp_entry = entry; } else { if (!entry->is_sub_map) { - vm_object_deallocate(entry->object.vm_object); + vm_object_deallocate(VME_OBJECT(entry)); } else { - vm_map_deallocate(entry->object.sub_map); + vm_map_deallocate(VME_SUBMAP(entry)); } _vm_map_entry_dispose(&map_header, entry); } } - if( target_map->disable_vmentry_reuse == TRUE) { - if( target_map->highest_entry_end < insp_entry->vme_end ){ + if (flags & VM_FLAGS_RESILIENT_CODESIGN) { + *cur_protection = VM_PROT_READ; + *max_protection = VM_PROT_READ; + } + + if (target_map->disable_vmentry_reuse == TRUE) { + assert(!target_map->is_nested_map); + if (target_map->highest_entry_end < insp_entry->vme_end) { target_map->highest_entry_end = insp_entry->vme_end; } } @@ -11548,12 +16343,51 @@ vm_map_remap( if (result == KERN_SUCCESS) { target_map->size += size; SAVE_HINT_MAP_WRITE(target_map, insp_entry); + +#if PMAP_CS + if (*max_protection & VM_PROT_EXECUTE) { + vm_map_address_t region_start = 0, region_size = 0; + struct pmap_cs_code_directory *region_cd = NULL; + vm_map_address_t base = 0; + struct pmap_cs_lookup_results results = {}; + vm_map_size_t page_addr = vm_map_trunc_page(memory_address, PAGE_MASK); + vm_map_size_t assoc_size = vm_map_round_page(memory_address + size - page_addr, PAGE_MASK); + + pmap_cs_lookup(src_map->pmap, memory_address, &results); + region_size = results.region_size; + region_start = results.region_start; + region_cd = results.region_cd_entry; + base = results.base; + + if (region_cd != NULL && (page_addr != region_start || assoc_size != region_size)) { + *cur_protection = VM_PROT_READ; + *max_protection = VM_PROT_READ; + printf("mismatched remap of executable range 0x%llx-0x%llx to 0x%llx, " + "region_start 0x%llx, region_size 0x%llx, cd_entry %sNULL, making non-executable.\n", + page_addr, page_addr + assoc_size, *address, + region_start, region_size, + region_cd != NULL ? "not " : "" // Don't leak kernel slide + ); + } + } +#endif } vm_map_unlock(target_map); - if (result == KERN_SUCCESS && target_map->wiring_required) - result = vm_map_wire(target_map, *address, - *address + size, *cur_protection, TRUE); + if (result == KERN_SUCCESS && target_map->wiring_required) { + result = vm_map_wire_kernel(target_map, *address, + *address + size, *cur_protection, VM_KERN_MEMORY_MLOCK, + TRUE); + } + + /* + * If requested, return the address of the data pointed to by the + * request, rather than the base of the resulting page. + */ + if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) { + *address += offset_in_mapping; + } + return result; } @@ -11570,54 +16404,107 @@ vm_map_remap( static kern_return_t vm_map_remap_range_allocate( - vm_map_t map, - vm_map_address_t *address, /* IN/OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_entry_t *map_entry) /* OUT */ + vm_map_t map, + vm_map_address_t *address, /* IN/OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + __unused vm_tag_t tag, + vm_map_entry_t *map_entry) /* OUT */ { - vm_map_entry_t entry; - vm_map_offset_t start; - vm_map_offset_t end; - kern_return_t kr; + vm_map_entry_t entry; + vm_map_offset_t start; + vm_map_offset_t end; + vm_map_offset_t desired_empty_end; + kern_return_t kr; + vm_map_entry_t hole_entry; -StartAgain: ; +StartAgain:; start = *address; - if (flags & VM_FLAGS_ANYWHERE) - { + if (flags & VM_FLAGS_ANYWHERE) { + if (flags & VM_FLAGS_RANDOM_ADDR) { + /* + * Get a random start address. + */ + kr = vm_map_random_address_for_size(map, address, size); + if (kr != KERN_SUCCESS) { + return kr; + } + start = *address; + } + /* * Calculate the first possible address. */ - if (start < map->min_offset) + if (start < map->min_offset) { start = map->min_offset; - if (start > map->max_offset) - return(KERN_NO_SPACE); - + } + if (start > map->max_offset) { + return KERN_NO_SPACE; + } + /* * Look for the first possible address; * if there's already something at this * address, we have to start after it. */ - if( map->disable_vmentry_reuse == TRUE) { + if (map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(map, entry, start); } else { - assert(first_free_is_valid(map)); - if (start == map->min_offset) { - if ((entry = map->first_free) != vm_map_to_entry(map)) - start = entry->vme_end; + if (map->holelistenabled) { + hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); + + if (hole_entry == NULL) { + /* + * No more space in the map? + */ + return KERN_NO_SPACE; + } else { + boolean_t found_hole = FALSE; + + do { + if (hole_entry->vme_start >= start) { + start = hole_entry->vme_start; + found_hole = TRUE; + break; + } + + if (hole_entry->vme_end > start) { + found_hole = TRUE; + break; + } + hole_entry = hole_entry->vme_next; + } while (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)); + + if (found_hole == FALSE) { + return KERN_NO_SPACE; + } + + entry = hole_entry; + } } else { - vm_map_entry_t tmp_entry; - if (vm_map_lookup_entry(map, start, &tmp_entry)) - start = tmp_entry->vme_end; - entry = tmp_entry; + assert(first_free_is_valid(map)); + if (start == map->min_offset) { + if ((entry = map->first_free) != vm_map_to_entry(map)) { + start = entry->vme_end; + } + } else { + vm_map_entry_t tmp_entry; + if (vm_map_lookup_entry(map, start, &tmp_entry)) { + start = tmp_entry->vme_end; + } + entry = tmp_entry; + } } + start = vm_map_round_page(start, + VM_MAP_PAGE_MASK(map)); } - + /* * In any case, the "entry" always precedes * the proposed new region throughout the @@ -11625,7 +16512,7 @@ StartAgain: ; */ while (TRUE) { - register vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -11634,15 +16521,21 @@ StartAgain: ; */ end = ((start + mask) & ~mask); - if (end < start) - return(KERN_NO_SPACE); + end = vm_map_round_page(end, + VM_MAP_PAGE_MASK(map)); + if (end < start) { + return KERN_NO_SPACE; + } start = end; end += size; - if ((end > map->max_offset) || (end < start)) { + /* We want an entire page of empty space, but don't increase the allocation size. */ + desired_empty_end = vm_map_round_page(end, VM_MAP_PAGE_MASK(map)); + + if ((desired_empty_end > map->max_offset) || (desired_empty_end < start)) { if (map->wait_for_space) { if (size <= (map->max_offset - - map->min_offset)) { + map->min_offset)) { assert_wait((event_t) map, THREAD_INTERRUPTIBLE); vm_map_unlock(map); thread_block(THREAD_CONTINUE_NULL); @@ -11650,45 +16543,73 @@ StartAgain: ; goto StartAgain; } } - - return(KERN_NO_SPACE); - } - /* - * If there are no more entries, we must win. - */ + return KERN_NO_SPACE; + } next = entry->vme_next; - if (next == vm_map_to_entry(map)) - break; - /* - * If there is another entry, it must be - * after the end of the potential new region. - */ + if (map->holelistenabled) { + if (entry->vme_end >= desired_empty_end) { + break; + } + } else { + /* + * If there are no more entries, we must win. + * + * OR + * + * If there is another entry, it must be + * after the end of the potential new region. + */ - if (next->vme_start >= end) - break; + if (next == vm_map_to_entry(map)) { + break; + } + + if (next->vme_start >= desired_empty_end) { + break; + } + } /* * Didn't fit -- move to the next entry. */ entry = next; - start = entry->vme_end; + + if (map->holelistenabled) { + if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { + /* + * Wrapped around + */ + return KERN_NO_SPACE; + } + start = entry->vme_start; + } else { + start = entry->vme_end; + } } + + if (map->holelistenabled) { + if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { + panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); + } + } + *address = start; } else { - vm_map_entry_t temp_entry; - + vm_map_entry_t temp_entry; + /* * Verify that: * the address doesn't itself violate * the mask requirement. */ - if ((start & mask) != 0) - return(KERN_NO_SPACE); + if ((start & mask) != 0) { + return KERN_NO_SPACE; + } /* @@ -11700,7 +16621,7 @@ StartAgain: ; if ((start < map->min_offset) || (end > map->max_offset) || (start >= end)) { - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } /* @@ -11709,6 +16630,7 @@ StartAgain: ; */ if (flags & VM_FLAGS_OVERWRITE) { vm_map_t zap_map; + int remove_flags = VM_MAP_REMOVE_SAVE_ENTRIES | VM_MAP_REMOVE_NO_MAP_ALIGN; /* * We use a "zap_map" to avoid having to unlock @@ -11717,19 +16639,24 @@ StartAgain: ; * combination. */ zap_map = vm_map_create(PMAP_NULL, - start, - end - start, - map->hdr.entries_pageable); + start, + end, + map->hdr.entries_pageable); if (zap_map == VM_MAP_NULL) { return KERN_RESOURCE_SHORTAGE; } + vm_map_set_page_shift(zap_map, VM_MAP_PAGE_SHIFT(map)); + vm_map_disable_hole_optimization(zap_map); + if (vmk_flags.vmkf_overwrite_immutable) { + remove_flags |= VM_MAP_REMOVE_IMMUTABLE; + } kr = vm_map_delete(map, start, end, - VM_MAP_REMOVE_SAVE_ENTRIES, - zap_map); + remove_flags, + zap_map); if (kr == KERN_SUCCESS) { vm_map_destroy(zap_map, - VM_MAP_REMOVE_NO_PMAP_CLEANUP); + VM_MAP_REMOVE_NO_PMAP_CLEANUP); zap_map = VM_MAP_NULL; } } @@ -11738,8 +16665,9 @@ StartAgain: ; * ... the starting address isn't allocated */ - if (vm_map_lookup_entry(map, start, &temp_entry)) - return(KERN_NO_SPACE); + if (vm_map_lookup_entry(map, start, &temp_entry)) { + return KERN_NO_SPACE; + } entry = temp_entry; @@ -11749,11 +16677,12 @@ StartAgain: ; */ if ((entry->vme_next != vm_map_to_entry(map)) && - (entry->vme_next->vme_start < end)) - return(KERN_NO_SPACE); + (entry->vme_next->vme_start < end)) { + return KERN_NO_SPACE; + } } *map_entry = entry; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -11764,11 +16693,11 @@ StartAgain: ; vm_map_t vm_map_switch( - vm_map_t map) + vm_map_t map) { - int mycpu; - thread_t thread = current_thread(); - vm_map_t oldmap = thread->map; + int mycpu; + thread_t thread = current_thread(); + vm_map_t oldmap = thread->map; mp_disable_preemption(); mycpu = cpu_number(); @@ -11779,7 +16708,7 @@ vm_map_switch( PMAP_SWITCH_USER(thread, map, mycpu); mp_enable_preemption(); - return(oldmap); + return oldmap; } @@ -11797,19 +16726,19 @@ vm_map_switch( */ kern_return_t vm_map_write_user( - vm_map_t map, - void *src_p, - vm_map_address_t dst_addr, - vm_size_t size) + vm_map_t map, + void *src_p, + vm_map_address_t dst_addr, + vm_size_t size) { - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; - if(current_map() == map) { + if (current_map() == map) { if (copyout(src_p, dst_addr, size)) { kr = KERN_INVALID_ADDRESS; } } else { - vm_map_t oldmap; + vm_map_t oldmap; /* take on the identity of the target map while doing */ /* the transfer */ @@ -11839,19 +16768,19 @@ vm_map_write_user( */ kern_return_t vm_map_read_user( - vm_map_t map, - vm_map_address_t src_addr, - void *dst_p, - vm_size_t size) + vm_map_t map, + vm_map_address_t src_addr, + void *dst_p, + vm_size_t size) { - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; - if(current_map() == map) { + if (current_map() == map) { if (copyin(src_addr, dst_p, size)) { kr = KERN_INVALID_ADDRESS; } } else { - vm_map_t oldmap; + vm_map_t oldmap; /* take on the identity of the target map while doing */ /* the transfer */ @@ -11861,1345 +16790,3335 @@ vm_map_read_user( if (copyin(src_addr, dst_p, size)) { kr = KERN_INVALID_ADDRESS; } - vm_map_switch(oldmap); - vm_map_deallocate(map); + vm_map_switch(oldmap); + vm_map_deallocate(map); + } + return kr; +} + + +/* + * vm_map_check_protection: + * + * Assert that the target map allows the specified + * privilege on the entire address region given. + * The entire region must be allocated. + */ +boolean_t +vm_map_check_protection(vm_map_t map, vm_map_offset_t start, + vm_map_offset_t end, vm_prot_t protection) +{ + vm_map_entry_t entry; + vm_map_entry_t tmp_entry; + + vm_map_lock(map); + + if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) { + vm_map_unlock(map); + return FALSE; + } + + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + vm_map_unlock(map); + return FALSE; + } + + entry = tmp_entry; + + while (start < end) { + if (entry == vm_map_to_entry(map)) { + vm_map_unlock(map); + return FALSE; + } + + /* + * No holes allowed! + */ + + if (start < entry->vme_start) { + vm_map_unlock(map); + return FALSE; + } + + /* + * Check protection associated with entry. + */ + + if ((entry->protection & protection) != protection) { + vm_map_unlock(map); + return FALSE; + } + + /* go to next entry */ + + start = entry->vme_end; + entry = entry->vme_next; + } + vm_map_unlock(map); + return TRUE; +} + +kern_return_t +vm_map_purgable_control( + vm_map_t map, + vm_map_offset_t address, + vm_purgable_t control, + int *state) +{ + vm_map_entry_t entry; + vm_object_t object; + kern_return_t kr; + boolean_t was_nonvolatile; + + /* + * Vet all the input parameters and current type and state of the + * underlaying object. Return with an error if anything is amiss. + */ + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (control != VM_PURGABLE_SET_STATE && + control != VM_PURGABLE_GET_STATE && + control != VM_PURGABLE_PURGE_ALL && + control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { + return KERN_INVALID_ARGUMENT; + } + + if (control == VM_PURGABLE_PURGE_ALL) { + vm_purgeable_object_purge_all(); + return KERN_SUCCESS; + } + + if ((control == VM_PURGABLE_SET_STATE || + control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && + (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || + ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) { + return KERN_INVALID_ARGUMENT; + } + + vm_map_lock_read(map); + + if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) { + /* + * Must pass a valid non-submap address. + */ + vm_map_unlock_read(map); + return KERN_INVALID_ADDRESS; + } + + if ((entry->protection & VM_PROT_WRITE) == 0) { + /* + * Can't apply purgable controls to something you can't write. + */ + vm_map_unlock_read(map); + return KERN_PROTECTION_FAILURE; + } + + object = VME_OBJECT(entry); + if (object == VM_OBJECT_NULL || + object->purgable == VM_PURGABLE_DENY) { + /* + * Object must already be present and be purgeable. + */ + vm_map_unlock_read(map); + return KERN_INVALID_ARGUMENT; + } + + vm_object_lock(object); + +#if 00 + if (VME_OFFSET(entry) != 0 || + entry->vme_end - entry->vme_start != object->vo_size) { + /* + * Can only apply purgable controls to the whole (existing) + * object at once. + */ + vm_map_unlock_read(map); + vm_object_unlock(object); + return KERN_INVALID_ARGUMENT; + } +#endif + + assert(!entry->is_sub_map); + assert(!entry->use_pmap); /* purgeable has its own accounting */ + + vm_map_unlock_read(map); + + was_nonvolatile = (object->purgable == VM_PURGABLE_NONVOLATILE); + + kr = vm_object_purgable_control(object, control, state); + + if (was_nonvolatile && + object->purgable != VM_PURGABLE_NONVOLATILE && + map->pmap == kernel_pmap) { +#if DEBUG + object->vo_purgeable_volatilizer = kernel_task; +#endif /* DEBUG */ + } + + vm_object_unlock(object); + + return kr; +} + +kern_return_t +vm_map_page_query_internal( + vm_map_t target_map, + vm_map_offset_t offset, + int *disposition, + int *ref_count) +{ + kern_return_t kr; + vm_page_info_basic_data_t info; + mach_msg_type_number_t count; + + count = VM_PAGE_INFO_BASIC_COUNT; + kr = vm_map_page_info(target_map, + offset, + VM_PAGE_INFO_BASIC, + (vm_page_info_t) &info, + &count); + if (kr == KERN_SUCCESS) { + *disposition = info.disposition; + *ref_count = info.ref_count; + } else { + *disposition = 0; + *ref_count = 0; + } + + return kr; +} + +kern_return_t +vm_map_page_info( + vm_map_t map, + vm_map_offset_t offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count) +{ + return vm_map_page_range_info_internal(map, + offset, /* start of range */ + (offset + 1), /* this will get rounded in the call to the page boundary */ + flavor, + info, + count); +} + +kern_return_t +vm_map_page_range_info_internal( + vm_map_t map, + vm_map_offset_t start_offset, + vm_map_offset_t end_offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count) +{ + vm_map_entry_t map_entry = VM_MAP_ENTRY_NULL; + vm_object_t object = VM_OBJECT_NULL, curr_object = VM_OBJECT_NULL; + vm_page_t m = VM_PAGE_NULL; + kern_return_t retval = KERN_SUCCESS; + int disposition = 0; + int ref_count = 0; + int depth = 0, info_idx = 0; + vm_page_info_basic_t basic_info = 0; + vm_map_offset_t offset_in_page = 0, offset_in_object = 0, curr_offset_in_object = 0; + vm_map_offset_t start = 0, end = 0, curr_s_offset = 0, curr_e_offset = 0; + boolean_t do_region_footprint; + ledger_amount_t ledger_resident, ledger_compressed; + + switch (flavor) { + case VM_PAGE_INFO_BASIC: + if (*count != VM_PAGE_INFO_BASIC_COUNT) { + /* + * The "vm_page_info_basic_data" structure was not + * properly padded, so allow the size to be off by + * one to maintain backwards binary compatibility... + */ + if (*count != VM_PAGE_INFO_BASIC_COUNT - 1) { + return KERN_INVALID_ARGUMENT; + } + } + break; + default: + return KERN_INVALID_ARGUMENT; + } + + do_region_footprint = task_self_region_footprint(); + disposition = 0; + ref_count = 0; + depth = 0; + info_idx = 0; /* Tracks the next index within the info structure to be filled.*/ + retval = KERN_SUCCESS; + + offset_in_page = start_offset & PAGE_MASK; + start = vm_map_trunc_page(start_offset, PAGE_MASK); + end = vm_map_round_page(end_offset, PAGE_MASK); + + if (end < start) { + return KERN_INVALID_ARGUMENT; + } + + assert((end - start) <= MAX_PAGE_RANGE_QUERY); + + vm_map_lock_read(map); + + task_ledgers_footprint(map->pmap->ledger, &ledger_resident, &ledger_compressed); + + for (curr_s_offset = start; curr_s_offset < end;) { + /* + * New lookup needs reset of these variables. + */ + curr_object = object = VM_OBJECT_NULL; + offset_in_object = 0; + ref_count = 0; + depth = 0; + + if (do_region_footprint && + curr_s_offset >= vm_map_last_entry(map)->vme_end) { + /* + * Request for "footprint" info about a page beyond + * the end of address space: this must be for + * the fake region vm_map_region_recurse_64() + * reported to account for non-volatile purgeable + * memory owned by this task. + */ + disposition = 0; + + if (curr_s_offset - vm_map_last_entry(map)->vme_end <= + (unsigned) ledger_compressed) { + /* + * We haven't reported all the "non-volatile + * compressed" pages yet, so report this fake + * page as "compressed". + */ + disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; + } else { + /* + * We've reported all the non-volatile + * compressed page but not all the non-volatile + * pages , so report this fake page as + * "resident dirty". + */ + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + disposition |= VM_PAGE_QUERY_PAGE_REF; + } + switch (flavor) { + case VM_PAGE_INFO_BASIC: + basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); + basic_info->disposition = disposition; + basic_info->ref_count = 1; + basic_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + basic_info->offset = 0; + basic_info->depth = 0; + + info_idx++; + break; + } + curr_s_offset += PAGE_SIZE; + continue; + } + + /* + * First, find the map entry covering "curr_s_offset", going down + * submaps if necessary. + */ + if (!vm_map_lookup_entry(map, curr_s_offset, &map_entry)) { + /* no entry -> no object -> no page */ + + if (curr_s_offset < vm_map_min(map)) { + /* + * Illegal address that falls below map min. + */ + curr_e_offset = MIN(end, vm_map_min(map)); + } else if (curr_s_offset >= vm_map_max(map)) { + /* + * Illegal address that falls on/after map max. + */ + curr_e_offset = end; + } else if (map_entry == vm_map_to_entry(map)) { + /* + * Hit a hole. + */ + if (map_entry->vme_next == vm_map_to_entry(map)) { + /* + * Empty map. + */ + curr_e_offset = MIN(map->max_offset, end); + } else { + /* + * Hole at start of the map. + */ + curr_e_offset = MIN(map_entry->vme_next->vme_start, end); + } + } else { + if (map_entry->vme_next == vm_map_to_entry(map)) { + /* + * Hole at the end of the map. + */ + curr_e_offset = MIN(map->max_offset, end); + } else { + curr_e_offset = MIN(map_entry->vme_next->vme_start, end); + } + } + + assert(curr_e_offset >= curr_s_offset); + + uint64_t num_pages = (curr_e_offset - curr_s_offset) >> PAGE_SHIFT; + + void *info_ptr = (void*) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); + + bzero(info_ptr, num_pages * sizeof(struct vm_page_info_basic)); + + curr_s_offset = curr_e_offset; + + info_idx += num_pages; + + continue; + } + + /* compute offset from this map entry's start */ + offset_in_object = curr_s_offset - map_entry->vme_start; + + /* compute offset into this map entry's object (or submap) */ + offset_in_object += VME_OFFSET(map_entry); + + if (map_entry->is_sub_map) { + vm_map_t sub_map = VM_MAP_NULL; + vm_page_info_t submap_info = 0; + vm_map_offset_t submap_s_offset = 0, submap_e_offset = 0, range_len = 0; + + range_len = MIN(map_entry->vme_end, end) - curr_s_offset; + + submap_s_offset = offset_in_object; + submap_e_offset = submap_s_offset + range_len; + + sub_map = VME_SUBMAP(map_entry); + + vm_map_reference(sub_map); + vm_map_unlock_read(map); + + submap_info = (vm_page_info_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); + + retval = vm_map_page_range_info_internal(sub_map, + submap_s_offset, + submap_e_offset, + VM_PAGE_INFO_BASIC, + (vm_page_info_t) submap_info, + count); + + assert(retval == KERN_SUCCESS); + + vm_map_lock_read(map); + vm_map_deallocate(sub_map); + + /* Move the "info" index by the number of pages we inspected.*/ + info_idx += range_len >> PAGE_SHIFT; + + /* Move our current offset by the size of the range we inspected.*/ + curr_s_offset += range_len; + + continue; + } + + object = VME_OBJECT(map_entry); + if (object == VM_OBJECT_NULL) { + /* + * We don't have an object here and, hence, + * no pages to inspect. We'll fill up the + * info structure appropriately. + */ + + curr_e_offset = MIN(map_entry->vme_end, end); + + uint64_t num_pages = (curr_e_offset - curr_s_offset) >> PAGE_SHIFT; + + void *info_ptr = (void*) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); + + bzero(info_ptr, num_pages * sizeof(struct vm_page_info_basic)); + + curr_s_offset = curr_e_offset; + + info_idx += num_pages; + + continue; + } + + if (do_region_footprint) { + int pmap_disp; + + disposition = 0; + pmap_disp = 0; + if (map->has_corpse_footprint) { + /* + * Query the page info data we saved + * while forking the corpse. + */ + vm_map_corpse_footprint_query_page_info( + map, + curr_s_offset, + &pmap_disp); + } else { + /* + * Query the pmap. + */ + pmap_query_page_info(map->pmap, + curr_s_offset, + &pmap_disp); + } + if (object->purgable == VM_PURGABLE_NONVOLATILE && + /* && not tagged as no-footprint? */ + VM_OBJECT_OWNER(object) != NULL && + VM_OBJECT_OWNER(object)->map == map) { + if ((((curr_s_offset + - map_entry->vme_start + + VME_OFFSET(map_entry)) + / PAGE_SIZE) < + (object->resident_page_count + + vm_compressor_pager_get_count(object->pager)))) { + /* + * Non-volatile purgeable object owned + * by this task: report the first + * "#resident + #compressed" pages as + * "resident" (to show that they + * contribute to the footprint) but not + * "dirty" (to avoid double-counting + * with the fake "non-volatile" region + * we'll report at the end of the + * address space to account for all + * (mapped or not) non-volatile memory + * owned by this task. + */ + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + } + } else if ((object->purgable == VM_PURGABLE_VOLATILE || + object->purgable == VM_PURGABLE_EMPTY) && + /* && not tagged as no-footprint? */ + VM_OBJECT_OWNER(object) != NULL && + VM_OBJECT_OWNER(object)->map == map) { + if ((((curr_s_offset + - map_entry->vme_start + + VME_OFFSET(map_entry)) + / PAGE_SIZE) < + object->wired_page_count)) { + /* + * Volatile|empty purgeable object owned + * by this task: report the first + * "#wired" pages as "resident" (to + * show that they contribute to the + * footprint) but not "dirty" (to avoid + * double-counting with the fake + * "non-volatile" region we'll report + * at the end of the address space to + * account for all (mapped or not) + * non-volatile memory owned by this + * task. + */ + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + } + } else if (map_entry->iokit_acct && + object->internal && + object->purgable == VM_PURGABLE_DENY) { + /* + * Non-purgeable IOKit memory: phys_footprint + * includes the entire virtual mapping. + */ + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + } else if (pmap_disp & (PMAP_QUERY_PAGE_ALTACCT | + PMAP_QUERY_PAGE_COMPRESSED_ALTACCT)) { + /* alternate accounting */ +#if CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) + if (map->pmap->footprint_was_suspended || + /* + * XXX corpse does not know if original + * pmap had its footprint suspended... + */ + map->has_corpse_footprint) { + /* + * The assertion below can fail if dyld + * suspended footprint accounting + * while doing some adjustments to + * this page; the mapping would say + * "use pmap accounting" but the page + * would be marked "alternate + * accounting". + */ + } else +#endif /* CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) */ + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + pmap_disp = 0; + } else { + if (pmap_disp & PMAP_QUERY_PAGE_PRESENT) { + assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + disposition |= VM_PAGE_QUERY_PAGE_REF; + if (pmap_disp & PMAP_QUERY_PAGE_INTERNAL) { + disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + } else { + disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL; + } + if (pmap_disp & PMAP_QUERY_PAGE_REUSABLE) { + disposition |= VM_PAGE_QUERY_PAGE_REUSABLE; + } + } else if (pmap_disp & PMAP_QUERY_PAGE_COMPRESSED) { + assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; + } + } + switch (flavor) { + case VM_PAGE_INFO_BASIC: + basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); + basic_info->disposition = disposition; + basic_info->ref_count = 1; + basic_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + basic_info->offset = 0; + basic_info->depth = 0; + + info_idx++; + break; + } + curr_s_offset += PAGE_SIZE; + continue; + } + + vm_object_reference(object); + /* + * Shared mode -- so we can allow other readers + * to grab the lock too. + */ + vm_object_lock_shared(object); + + curr_e_offset = MIN(map_entry->vme_end, end); + + vm_map_unlock_read(map); + + map_entry = NULL; /* map is unlocked, the entry is no longer valid. */ + + curr_object = object; + + for (; curr_s_offset < curr_e_offset;) { + if (object == curr_object) { + ref_count = curr_object->ref_count - 1; /* account for our object reference above. */ + } else { + ref_count = curr_object->ref_count; + } + + curr_offset_in_object = offset_in_object; + + for (;;) { + m = vm_page_lookup(curr_object, curr_offset_in_object); + + if (m != VM_PAGE_NULL) { + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + break; + } else { + if (curr_object->internal && + curr_object->alive && + !curr_object->terminating && + curr_object->pager_ready) { + if (VM_COMPRESSOR_PAGER_STATE_GET(curr_object, curr_offset_in_object) + == VM_EXTERNAL_STATE_EXISTS) { + /* the pager has that page */ + disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; + break; + } + } + + /* + * Go down the VM object shadow chain until we find the page + * we're looking for. + */ + + if (curr_object->shadow != VM_OBJECT_NULL) { + vm_object_t shadow = VM_OBJECT_NULL; + + curr_offset_in_object += curr_object->vo_shadow_offset; + shadow = curr_object->shadow; + + vm_object_lock_shared(shadow); + vm_object_unlock(curr_object); + + curr_object = shadow; + depth++; + continue; + } else { + break; + } + } + } + + /* The ref_count is not strictly accurate, it measures the number */ + /* of entities holding a ref on the object, they may not be mapping */ + /* the object or may not be mapping the section holding the */ + /* target page but its still a ball park number and though an over- */ + /* count, it picks up the copy-on-write cases */ + + /* We could also get a picture of page sharing from pmap_attributes */ + /* but this would under count as only faulted-in mappings would */ + /* show up. */ + + if ((curr_object == object) && curr_object->shadow) { + disposition |= VM_PAGE_QUERY_PAGE_COPIED; + } + + if (!curr_object->internal) { + disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL; + } + + if (m != VM_PAGE_NULL) { + if (m->vmp_fictitious) { + disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS; + } else { + if (m->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m))) { + disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + } + + if (m->vmp_reference || pmap_is_referenced(VM_PAGE_GET_PHYS_PAGE(m))) { + disposition |= VM_PAGE_QUERY_PAGE_REF; + } + + if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) { + disposition |= VM_PAGE_QUERY_PAGE_SPECULATIVE; + } + + if (m->vmp_cs_validated) { + disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED; + } + if (m->vmp_cs_tainted) { + disposition |= VM_PAGE_QUERY_PAGE_CS_TAINTED; + } + if (m->vmp_cs_nx) { + disposition |= VM_PAGE_QUERY_PAGE_CS_NX; + } + if (m->vmp_reusable || curr_object->all_reusable) { + disposition |= VM_PAGE_QUERY_PAGE_REUSABLE; + } + } + } + + switch (flavor) { + case VM_PAGE_INFO_BASIC: + basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); + basic_info->disposition = disposition; + basic_info->ref_count = ref_count; + basic_info->object_id = (vm_object_id_t) (uintptr_t) + VM_KERNEL_ADDRPERM(curr_object); + basic_info->offset = + (memory_object_offset_t) curr_offset_in_object + offset_in_page; + basic_info->depth = depth; + + info_idx++; + break; + } + + disposition = 0; + offset_in_page = 0; // This doesn't really make sense for any offset other than the starting offset. + + /* + * Move to next offset in the range and in our object. + */ + curr_s_offset += PAGE_SIZE; + offset_in_object += PAGE_SIZE; + curr_offset_in_object = offset_in_object; + + if (curr_object != object) { + vm_object_unlock(curr_object); + + curr_object = object; + + vm_object_lock_shared(curr_object); + } else { + vm_object_lock_yield_shared(curr_object); + } + } + + vm_object_unlock(curr_object); + vm_object_deallocate(curr_object); + + vm_map_lock_read(map); + } + + vm_map_unlock_read(map); + return retval; +} + +/* + * vm_map_msync + * + * Synchronises the memory range specified with its backing store + * image by either flushing or cleaning the contents to the appropriate + * memory manager engaging in a memory object synchronize dialog with + * the manager. The client doesn't return until the manager issues + * m_o_s_completed message. MIG Magically converts user task parameter + * to the task's address map. + * + * interpretation of sync_flags + * VM_SYNC_INVALIDATE - discard pages, only return precious + * pages to manager. + * + * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) + * - discard pages, write dirty or precious + * pages back to memory manager. + * + * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS + * - write dirty or precious pages back to + * the memory manager. + * + * VM_SYNC_CONTIGUOUS - does everything normally, but if there + * is a hole in the region, and we would + * have returned KERN_SUCCESS, return + * KERN_INVALID_ADDRESS instead. + * + * NOTE + * The memory object attributes have not yet been implemented, this + * function will have to deal with the invalidate attribute + * + * RETURNS + * KERN_INVALID_TASK Bad task parameter + * KERN_INVALID_ARGUMENT both sync and async were specified. + * KERN_SUCCESS The usual. + * KERN_INVALID_ADDRESS There was a hole in the region. + */ + +kern_return_t +vm_map_msync( + vm_map_t map, + vm_map_address_t address, + vm_map_size_t size, + vm_sync_t sync_flags) +{ + vm_map_entry_t entry; + vm_map_size_t amount_left; + vm_object_offset_t offset; + boolean_t do_sync_req; + boolean_t had_hole = FALSE; + vm_map_offset_t pmap_offset; + + if ((sync_flags & VM_SYNC_ASYNCHRONOUS) && + (sync_flags & VM_SYNC_SYNCHRONOUS)) { + return KERN_INVALID_ARGUMENT; + } + + /* + * align address and size on page boundaries + */ + size = (vm_map_round_page(address + size, + VM_MAP_PAGE_MASK(map)) - + vm_map_trunc_page(address, + VM_MAP_PAGE_MASK(map))); + address = vm_map_trunc_page(address, + VM_MAP_PAGE_MASK(map)); + + if (map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } + + if (size == 0) { + return KERN_SUCCESS; + } + + amount_left = size; + + while (amount_left > 0) { + vm_object_size_t flush_size; + vm_object_t object; + + vm_map_lock(map); + if (!vm_map_lookup_entry(map, + address, + &entry)) { + vm_map_size_t skip; + + /* + * hole in the address map. + */ + had_hole = TRUE; + + if (sync_flags & VM_SYNC_KILLPAGES) { + /* + * For VM_SYNC_KILLPAGES, there should be + * no holes in the range, since we couldn't + * prevent someone else from allocating in + * that hole and we wouldn't want to "kill" + * their pages. + */ + vm_map_unlock(map); + break; + } + + /* + * Check for empty map. + */ + if (entry == vm_map_to_entry(map) && + entry->vme_next == entry) { + vm_map_unlock(map); + break; + } + /* + * Check that we don't wrap and that + * we have at least one real map entry. + */ + if ((map->hdr.nentries == 0) || + (entry->vme_next->vme_start < address)) { + vm_map_unlock(map); + break; + } + /* + * Move up to the next entry if needed + */ + skip = (entry->vme_next->vme_start - address); + if (skip >= amount_left) { + amount_left = 0; + } else { + amount_left -= skip; + } + address = entry->vme_next->vme_start; + vm_map_unlock(map); + continue; + } + + offset = address - entry->vme_start; + pmap_offset = address; + + /* + * do we have more to flush than is contained in this + * entry ? + */ + if (amount_left + entry->vme_start + offset > entry->vme_end) { + flush_size = entry->vme_end - + (entry->vme_start + offset); + } else { + flush_size = amount_left; + } + amount_left -= flush_size; + address += flush_size; + + if (entry->is_sub_map == TRUE) { + vm_map_t local_map; + vm_map_offset_t local_offset; + + local_map = VME_SUBMAP(entry); + local_offset = VME_OFFSET(entry); + vm_map_reference(local_map); + vm_map_unlock(map); + if (vm_map_msync( + local_map, + local_offset, + flush_size, + sync_flags) == KERN_INVALID_ADDRESS) { + had_hole = TRUE; + } + vm_map_deallocate(local_map); + continue; + } + object = VME_OBJECT(entry); + + /* + * We can't sync this object if the object has not been + * created yet + */ + if (object == VM_OBJECT_NULL) { + vm_map_unlock(map); + continue; + } + offset += VME_OFFSET(entry); + + vm_object_lock(object); + + if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) { + int kill_pages = 0; + boolean_t reusable_pages = FALSE; + + if (sync_flags & VM_SYNC_KILLPAGES) { + if (((object->ref_count == 1) || + ((object->copy_strategy != + MEMORY_OBJECT_COPY_SYMMETRIC) && + (object->copy == VM_OBJECT_NULL))) && + (object->shadow == VM_OBJECT_NULL)) { + if (object->ref_count != 1) { + vm_page_stats_reusable.free_shared++; + } + kill_pages = 1; + } else { + kill_pages = -1; + } + } + if (kill_pages != -1) { + vm_object_deactivate_pages( + object, + offset, + (vm_object_size_t) flush_size, + kill_pages, + reusable_pages, + map->pmap, + pmap_offset); + } + vm_object_unlock(object); + vm_map_unlock(map); + continue; + } + /* + * We can't sync this object if there isn't a pager. + * Don't bother to sync internal objects, since there can't + * be any "permanent" storage for these objects anyway. + */ + if ((object->pager == MEMORY_OBJECT_NULL) || + (object->internal) || (object->private)) { + vm_object_unlock(object); + vm_map_unlock(map); + continue; + } + /* + * keep reference on the object until syncing is done + */ + vm_object_reference_locked(object); + vm_object_unlock(object); + + vm_map_unlock(map); + + do_sync_req = vm_object_sync(object, + offset, + flush_size, + sync_flags & VM_SYNC_INVALIDATE, + ((sync_flags & VM_SYNC_SYNCHRONOUS) || + (sync_flags & VM_SYNC_ASYNCHRONOUS)), + sync_flags & VM_SYNC_SYNCHRONOUS); + + if ((sync_flags & VM_SYNC_INVALIDATE) && object->resident_page_count == 0) { + /* + * clear out the clustering and read-ahead hints + */ + vm_object_lock(object); + + object->pages_created = 0; + object->pages_used = 0; + object->sequential = 0; + object->last_alloc = 0; + + vm_object_unlock(object); + } + vm_object_deallocate(object); + } /* while */ + + /* for proper msync() behaviour */ + if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS)) { + return KERN_INVALID_ADDRESS; + } + + return KERN_SUCCESS; +}/* vm_msync */ + +/* + * Routine: convert_port_entry_to_map + * Purpose: + * Convert from a port specifying an entry or a task + * to a map. Doesn't consume the port ref; produces a map ref, + * which may be null. Unlike convert_port_to_map, the + * port may be task or a named entry backed. + * Conditions: + * Nothing locked. + */ + + +vm_map_t +convert_port_entry_to_map( + ipc_port_t port) +{ + vm_map_t map; + vm_named_entry_t named_entry; + uint32_t try_failed_count = 0; + + if (IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { + while (TRUE) { + ip_lock(port); + if (ip_active(port) && (ip_kotype(port) + == IKOT_NAMED_ENTRY)) { + named_entry = + (vm_named_entry_t) ip_get_kobject(port); + if (!(lck_mtx_try_lock(&(named_entry)->Lock))) { + ip_unlock(port); + + try_failed_count++; + mutex_pause(try_failed_count); + continue; + } + named_entry->ref_count++; + lck_mtx_unlock(&(named_entry)->Lock); + ip_unlock(port); + if ((named_entry->is_sub_map) && + (named_entry->protection + & VM_PROT_WRITE)) { + map = named_entry->backing.map; + } else { + mach_destroy_memory_entry(port); + return VM_MAP_NULL; + } + vm_map_reference_swap(map); + mach_destroy_memory_entry(port); + break; + } else { + return VM_MAP_NULL; + } + } + } else { + map = convert_port_to_map(port); + } + + return map; +} + +/* + * Routine: convert_port_entry_to_object + * Purpose: + * Convert from a port specifying a named entry to an + * object. Doesn't consume the port ref; produces a map ref, + * which may be null. + * Conditions: + * Nothing locked. + */ + + +vm_object_t +convert_port_entry_to_object( + ipc_port_t port) +{ + vm_object_t object = VM_OBJECT_NULL; + vm_named_entry_t named_entry; + uint32_t try_failed_count = 0; + + if (IP_VALID(port) && + (ip_kotype(port) == IKOT_NAMED_ENTRY)) { +try_again: + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_NAMED_ENTRY)) { + named_entry = (vm_named_entry_t) ip_get_kobject(port); + if (!(lck_mtx_try_lock(&(named_entry)->Lock))) { + ip_unlock(port); + try_failed_count++; + mutex_pause(try_failed_count); + goto try_again; + } + named_entry->ref_count++; + lck_mtx_unlock(&(named_entry)->Lock); + ip_unlock(port); + if (!(named_entry->is_sub_map) && + !(named_entry->is_copy) && + (named_entry->protection & VM_PROT_WRITE)) { + object = named_entry->backing.object; + vm_object_reference(object); + } + mach_destroy_memory_entry(port); + } + } + + return object; +} + +/* + * Export routines to other components for the things we access locally through + * macros. + */ +#undef current_map +vm_map_t +current_map(void) +{ + return current_map_fast(); +} + +/* + * vm_map_reference: + * + * Most code internal to the osfmk will go through a + * macro defining this. This is always here for the + * use of other kernel components. + */ +#undef vm_map_reference +void +vm_map_reference( + vm_map_t map) +{ + if (map == VM_MAP_NULL) { + return; + } + + lck_mtx_lock(&map->s_lock); +#if TASK_SWAPPER + assert(map->res_count > 0); + assert(os_ref_get_count(&map->map_refcnt) >= map->res_count); + map->res_count++; +#endif + os_ref_retain_locked(&map->map_refcnt); + lck_mtx_unlock(&map->s_lock); +} + +/* + * vm_map_deallocate: + * + * Removes a reference from the specified map, + * destroying it if no references remain. + * The map should not be locked. + */ +void +vm_map_deallocate( + vm_map_t map) +{ + unsigned int ref; + + if (map == VM_MAP_NULL) { + return; + } + + lck_mtx_lock(&map->s_lock); + ref = os_ref_release_locked(&map->map_refcnt); + if (ref > 0) { + vm_map_res_deallocate(map); + lck_mtx_unlock(&map->s_lock); + return; + } + assert(os_ref_get_count(&map->map_refcnt) == 0); + lck_mtx_unlock(&map->s_lock); + +#if TASK_SWAPPER + /* + * The map residence count isn't decremented here because + * the vm_map_delete below will traverse the entire map, + * deleting entries, and the residence counts on objects + * and sharing maps will go away then. + */ +#endif + + vm_map_destroy(map, VM_MAP_REMOVE_NO_FLAGS); +} + + +void +vm_map_disable_NX(vm_map_t map) +{ + if (map == NULL) { + return; + } + if (map->pmap == NULL) { + return; + } + + pmap_disable_NX(map->pmap); +} + +void +vm_map_disallow_data_exec(vm_map_t map) +{ + if (map == NULL) { + return; + } + + map->map_disallow_data_exec = TRUE; +} + +/* XXX Consider making these constants (VM_MAX_ADDRESS and MACH_VM_MAX_ADDRESS) + * more descriptive. + */ +void +vm_map_set_32bit(vm_map_t map) +{ +#if defined(__arm__) || defined(__arm64__) + map->max_offset = pmap_max_offset(FALSE, ARM_PMAP_MAX_OFFSET_DEVICE); +#else + map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS; +#endif +} + + +void +vm_map_set_64bit(vm_map_t map) +{ +#if defined(__arm__) || defined(__arm64__) + map->max_offset = pmap_max_offset(TRUE, ARM_PMAP_MAX_OFFSET_DEVICE); +#else + map->max_offset = (vm_map_offset_t)MACH_VM_MAX_ADDRESS; +#endif +} + +/* + * Expand the maximum size of an existing map to the maximum supported. + */ +void +vm_map_set_jumbo(vm_map_t map) +{ +#if defined (__arm64__) + vm_map_set_max_addr(map, ~0); +#else /* arm64 */ + (void) map; +#endif +} + +/* + * This map has a JIT entitlement + */ +void +vm_map_set_jit_entitled(vm_map_t map) +{ +#if defined (__arm64__) + pmap_set_jit_entitled(map->pmap); +#else /* arm64 */ + (void) map; +#endif +} + +/* + * Expand the maximum size of an existing map. + */ +void +vm_map_set_max_addr(vm_map_t map, vm_map_offset_t new_max_offset) +{ +#if defined(__arm64__) + vm_map_offset_t max_supported_offset = 0; + vm_map_offset_t old_max_offset = map->max_offset; + max_supported_offset = pmap_max_offset(vm_map_is_64bit(map), ARM_PMAP_MAX_OFFSET_JUMBO); + + new_max_offset = trunc_page(new_max_offset); + + /* The address space cannot be shrunk using this routine. */ + if (old_max_offset >= new_max_offset) { + return; + } + + if (max_supported_offset < new_max_offset) { + new_max_offset = max_supported_offset; + } + + map->max_offset = new_max_offset; + + if (map->holes_list->prev->vme_end == old_max_offset) { + /* + * There is already a hole at the end of the map; simply make it bigger. + */ + map->holes_list->prev->vme_end = map->max_offset; + } else { + /* + * There is no hole at the end, so we need to create a new hole + * for the new empty space we're creating. + */ + struct vm_map_links *new_hole = zalloc(vm_map_holes_zone); + new_hole->start = old_max_offset; + new_hole->end = map->max_offset; + new_hole->prev = map->holes_list->prev; + new_hole->next = (struct vm_map_entry *)map->holes_list; + map->holes_list->prev->links.next = (struct vm_map_entry *)new_hole; + map->holes_list->prev = (struct vm_map_entry *)new_hole; + } +#else + (void)map; + (void)new_max_offset; +#endif +} + +vm_map_offset_t +vm_compute_max_offset(boolean_t is64) +{ +#if defined(__arm__) || defined(__arm64__) + return pmap_max_offset(is64, ARM_PMAP_MAX_OFFSET_DEVICE); +#else + return is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS; +#endif +} + +void +vm_map_get_max_aslr_slide_section( + vm_map_t map __unused, + int64_t *max_sections, + int64_t *section_size) +{ +#if defined(__arm64__) + *max_sections = 3; + *section_size = ARM_TT_TWIG_SIZE; +#else + *max_sections = 1; + *section_size = 0; +#endif +} + +uint64_t +vm_map_get_max_aslr_slide_pages(vm_map_t map) +{ +#if defined(__arm64__) + /* Limit arm64 slide to 16MB to conserve contiguous VA space in the more + * limited embedded address space; this is also meant to minimize pmap + * memory usage on 16KB page systems. + */ + return 1 << (24 - VM_MAP_PAGE_SHIFT(map)); +#else + return 1 << (vm_map_is_64bit(map) ? 16 : 8); +#endif +} + +uint64_t +vm_map_get_max_loader_aslr_slide_pages(vm_map_t map) +{ +#if defined(__arm64__) + /* We limit the loader slide to 4MB, in order to ensure at least 8 bits + * of independent entropy on 16KB page systems. + */ + return 1 << (22 - VM_MAP_PAGE_SHIFT(map)); +#else + return 1 << (vm_map_is_64bit(map) ? 16 : 8); +#endif +} + +#ifndef __arm__ +boolean_t +vm_map_is_64bit( + vm_map_t map) +{ + return map->max_offset > ((vm_map_offset_t)VM_MAX_ADDRESS); +} +#endif + +boolean_t +vm_map_has_hard_pagezero( + vm_map_t map, + vm_map_offset_t pagezero_size) +{ + /* + * XXX FBDP + * We should lock the VM map (for read) here but we can get away + * with it for now because there can't really be any race condition: + * the VM map's min_offset is changed only when the VM map is created + * and when the zero page is established (when the binary gets loaded), + * and this routine gets called only when the task terminates and the + * VM map is being torn down, and when a new map is created via + * load_machfile()/execve(). + */ + return map->min_offset >= pagezero_size; +} + +/* + * Raise a VM map's maximun offset. + */ +kern_return_t +vm_map_raise_max_offset( + vm_map_t map, + vm_map_offset_t new_max_offset) +{ + kern_return_t ret; + + vm_map_lock(map); + ret = KERN_INVALID_ADDRESS; + + if (new_max_offset >= map->max_offset) { + if (!vm_map_is_64bit(map)) { + if (new_max_offset <= (vm_map_offset_t)VM_MAX_ADDRESS) { + map->max_offset = new_max_offset; + ret = KERN_SUCCESS; + } + } else { + if (new_max_offset <= (vm_map_offset_t)MACH_VM_MAX_ADDRESS) { + map->max_offset = new_max_offset; + ret = KERN_SUCCESS; + } + } } - return kr; + + vm_map_unlock(map); + return ret; } /* - * vm_map_check_protection: - * - * Assert that the target map allows the specified - * privilege on the entire address region given. - * The entire region must be allocated. + * Raise a VM map's minimum offset. + * To strictly enforce "page zero" reservation. */ -boolean_t -vm_map_check_protection(vm_map_t map, vm_map_offset_t start, - vm_map_offset_t end, vm_prot_t protection) +kern_return_t +vm_map_raise_min_offset( + vm_map_t map, + vm_map_offset_t new_min_offset) { - vm_map_entry_t entry; - vm_map_entry_t tmp_entry; + vm_map_entry_t first_entry; + + new_min_offset = vm_map_round_page(new_min_offset, + VM_MAP_PAGE_MASK(map)); vm_map_lock(map); - if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) - { + if (new_min_offset < map->min_offset) { + /* + * Can't move min_offset backwards, as that would expose + * a part of the address space that was previously, and for + * possibly good reasons, inaccessible. + */ + vm_map_unlock(map); + return KERN_INVALID_ADDRESS; + } + if (new_min_offset >= map->max_offset) { + /* can't go beyond the end of the address space */ vm_map_unlock(map); - return (FALSE); + return KERN_INVALID_ADDRESS; } - if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + first_entry = vm_map_first_entry(map); + if (first_entry != vm_map_to_entry(map) && + first_entry->vme_start < new_min_offset) { + /* + * Some memory was already allocated below the new + * minimun offset. It's too late to change it now... + */ vm_map_unlock(map); - return(FALSE); + return KERN_NO_SPACE; } - entry = tmp_entry; + map->min_offset = new_min_offset; - while (start < end) { - if (entry == vm_map_to_entry(map)) { - vm_map_unlock(map); - return(FALSE); - } + assert(map->holes_list); + map->holes_list->start = new_min_offset; + assert(new_min_offset < map->holes_list->end); - /* - * No holes allowed! - */ + vm_map_unlock(map); - if (start < entry->vme_start) { - vm_map_unlock(map); - return(FALSE); - } + return KERN_SUCCESS; +} - /* - * Check protection associated with entry. - */ +/* + * Set the limit on the maximum amount of user wired memory allowed for this map. + * This is basically a copy of the MEMLOCK rlimit value maintained by the BSD side of + * the kernel. The limits are checked in the mach VM side, so we keep a copy so we + * don't have to reach over to the BSD data structures. + */ - if ((entry->protection & protection) != protection) { - vm_map_unlock(map); - return(FALSE); - } +void +vm_map_set_user_wire_limit(vm_map_t map, + vm_size_t limit) +{ + map->user_wire_limit = limit; +} - /* go to next entry */ - start = entry->vme_end; - entry = entry->vme_next; - } +void +vm_map_switch_protect(vm_map_t map, + boolean_t val) +{ + vm_map_lock(map); + map->switch_protect = val; vm_map_unlock(map); - return(TRUE); } +/* + * IOKit has mapped a region into this map; adjust the pmap's ledgers appropriately. + * phys_footprint is a composite limit consisting of iokit + physmem, so we need to + * bump both counters. + */ +void +vm_map_iokit_mapped_region(vm_map_t map, vm_size_t bytes) +{ + pmap_t pmap = vm_map_pmap(map); + + ledger_credit(pmap->ledger, task_ledgers.iokit_mapped, bytes); + ledger_credit(pmap->ledger, task_ledgers.phys_footprint, bytes); +} + +void +vm_map_iokit_unmapped_region(vm_map_t map, vm_size_t bytes) +{ + pmap_t pmap = vm_map_pmap(map); + + ledger_debit(pmap->ledger, task_ledgers.iokit_mapped, bytes); + ledger_debit(pmap->ledger, task_ledgers.phys_footprint, bytes); +} + +/* Add (generate) code signature for memory range */ +#if CONFIG_DYNAMIC_CODE_SIGNING kern_return_t -vm_map_purgable_control( - vm_map_t map, - vm_map_offset_t address, - vm_purgable_t control, - int *state) +vm_map_sign(vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; - vm_object_t object; - kern_return_t kr; + vm_map_entry_t entry; + vm_page_t m; + vm_object_t object; /* * Vet all the input parameters and current type and state of the * underlaying object. Return with an error if anything is amiss. */ - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); - - if (control != VM_PURGABLE_SET_STATE && - control != VM_PURGABLE_GET_STATE && - control != VM_PURGABLE_PURGE_ALL) - return(KERN_INVALID_ARGUMENT); - - if (control == VM_PURGABLE_PURGE_ALL) { - vm_purgeable_object_purge_all(); - return KERN_SUCCESS; + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; } - if (control == VM_PURGABLE_SET_STATE && - (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || - ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) - return(KERN_INVALID_ARGUMENT); - vm_map_lock_read(map); - if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) { - + if (!vm_map_lookup_entry(map, start, &entry) || entry->is_sub_map) { /* * Must pass a valid non-submap address. */ vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - if ((entry->protection & VM_PROT_WRITE) == 0) { + if ((entry->vme_start > start) || (entry->vme_end < end)) { /* - * Can't apply purgable controls to something you can't write. + * Map entry doesn't cover the requested range. Not handling + * this situation currently. */ vm_map_unlock_read(map); - return(KERN_PROTECTION_FAILURE); + return KERN_INVALID_ARGUMENT; } - object = entry->object.vm_object; + object = VME_OBJECT(entry); if (object == VM_OBJECT_NULL) { /* - * Object must already be present or it can't be purgable. + * Object must already be present or we can't sign. */ vm_map_unlock_read(map); return KERN_INVALID_ARGUMENT; } - - vm_object_lock(object); - if (entry->offset != 0 || - entry->vme_end - entry->vme_start != object->vo_size) { - /* - * Can only apply purgable controls to the whole (existing) - * object at once. - */ - vm_map_unlock_read(map); - vm_object_unlock(object); - return KERN_INVALID_ARGUMENT; - } - + vm_object_lock(object); vm_map_unlock_read(map); - kr = vm_object_purgable_control(object, control, state); - - vm_object_unlock(object); - - return kr; -} - -kern_return_t -vm_map_page_query_internal( - vm_map_t target_map, - vm_map_offset_t offset, - int *disposition, - int *ref_count) -{ - kern_return_t kr; - vm_page_info_basic_data_t info; - mach_msg_type_number_t count; - - count = VM_PAGE_INFO_BASIC_COUNT; - kr = vm_map_page_info(target_map, - offset, - VM_PAGE_INFO_BASIC, - (vm_page_info_t) &info, - &count); - if (kr == KERN_SUCCESS) { - *disposition = info.disposition; - *ref_count = info.ref_count; - } else { - *disposition = 0; - *ref_count = 0; - } - - return kr; -} - -kern_return_t -vm_map_page_info( - vm_map_t map, - vm_map_offset_t offset, - vm_page_info_flavor_t flavor, - vm_page_info_t info, - mach_msg_type_number_t *count) -{ - vm_map_entry_t map_entry; - vm_object_t object; - vm_page_t m; - kern_return_t kr; - kern_return_t retval = KERN_SUCCESS; - boolean_t top_object; - int disposition; - int ref_count; - vm_object_id_t object_id; - vm_page_info_basic_t basic_info; - int depth; - vm_map_offset_t offset_in_page; + while (start < end) { + uint32_t refmod; - switch (flavor) { - case VM_PAGE_INFO_BASIC: - if (*count != VM_PAGE_INFO_BASIC_COUNT) { - /* - * The "vm_page_info_basic_data" structure was not - * properly padded, so allow the size to be off by - * one to maintain backwards binary compatibility... - */ - if (*count != VM_PAGE_INFO_BASIC_COUNT - 1) - return KERN_INVALID_ARGUMENT; + m = vm_page_lookup(object, + start - entry->vme_start + VME_OFFSET(entry)); + if (m == VM_PAGE_NULL) { + /* shoud we try to fault a page here? we can probably + * demand it exists and is locked for this request */ + vm_object_unlock(object); + return KERN_FAILURE; } - break; - default: - return KERN_INVALID_ARGUMENT; - } - - disposition = 0; - ref_count = 0; - object_id = 0; - top_object = TRUE; - depth = 0; - - retval = KERN_SUCCESS; - offset_in_page = offset & PAGE_MASK; - offset = vm_map_trunc_page(offset); - - vm_map_lock_read(map); - - /* - * First, find the map entry covering "offset", going down - * submaps if necessary. - */ - for (;;) { - if (!vm_map_lookup_entry(map, offset, &map_entry)) { - vm_map_unlock_read(map); - return KERN_INVALID_ADDRESS; + /* deal with special page status */ + if (m->vmp_busy || + (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent))) { + vm_object_unlock(object); + return KERN_FAILURE; } - /* compute offset from this map entry's start */ - offset -= map_entry->vme_start; - /* compute offset into this map entry's object (or submap) */ - offset += map_entry->offset; - if (map_entry->is_sub_map) { - vm_map_t sub_map; - - sub_map = map_entry->object.sub_map; - vm_map_lock_read(sub_map); - vm_map_unlock_read(map); + /* Page is OK... now "validate" it */ + /* This is the place where we'll call out to create a code + * directory, later */ + m->vmp_cs_validated = TRUE; - map = sub_map; + /* The page is now "clean" for codesigning purposes. That means + * we don't consider it as modified (wpmapped) anymore. But + * we'll disconnect the page so we note any future modification + * attempts. */ + m->vmp_wpmapped = FALSE; + refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); - ref_count = MAX(ref_count, map->ref_count); - continue; + /* Pull the dirty status from the pmap, since we cleared the + * wpmapped bit */ + if ((refmod & VM_MEM_MODIFIED) && !m->vmp_dirty) { + SET_PAGE_DIRTY(m, FALSE); } - break; - } - object = map_entry->object.vm_object; - if (object == VM_OBJECT_NULL) { - /* no object -> no page */ - vm_map_unlock_read(map); - goto done; + /* On to the next page */ + start += PAGE_SIZE; } + vm_object_unlock(object); - vm_object_lock(object); - vm_map_unlock_read(map); - - /* - * Go down the VM object shadow chain until we find the page - * we're looking for. - */ - for (;;) { - ref_count = MAX(ref_count, object->ref_count); - - m = vm_page_lookup(object, offset); - - if (m != VM_PAGE_NULL) { - disposition |= VM_PAGE_QUERY_PAGE_PRESENT; - break; - } else { -#if MACH_PAGEMAP - if (object->existence_map) { - if (vm_external_state_get(object->existence_map, - offset) == - VM_EXTERNAL_STATE_EXISTS) { - /* - * this page has been paged out - */ - disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; - break; - } - } else + return KERN_SUCCESS; +} #endif - { - if (object->internal && - object->alive && - !object->terminating && - object->pager_ready) { - - memory_object_t pager; - - vm_object_paging_begin(object); - pager = object->pager; - vm_object_unlock(object); - - /* - * Ask the default pager if - * it has this page. - */ - kr = memory_object_data_request( - pager, - offset + object->paging_offset, - 0, /* just poke the pager */ - VM_PROT_READ, - NULL); - - vm_object_lock(object); - vm_object_paging_end(object); - - if (kr == KERN_SUCCESS) { - /* the default pager has it */ - disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; - break; - } - } - } - - if (object->shadow != VM_OBJECT_NULL) { - vm_object_t shadow; - - offset += object->vo_shadow_offset; - shadow = object->shadow; - - vm_object_lock(shadow); - vm_object_unlock(object); - - object = shadow; - top_object = FALSE; - depth++; - } else { -// if (!object->internal) -// break; -// retval = KERN_FAILURE; -// goto done_with_object; - break; - } - } - } - /* The ref_count is not strictly accurate, it measures the number */ - /* of entities holding a ref on the object, they may not be mapping */ - /* the object or may not be mapping the section holding the */ - /* target page but its still a ball park number and though an over- */ - /* count, it picks up the copy-on-write cases */ - /* We could also get a picture of page sharing from pmap_attributes */ - /* but this would under count as only faulted-in mappings would */ - /* show up. */ - - if (top_object == TRUE && object->shadow) - disposition |= VM_PAGE_QUERY_PAGE_COPIED; +kern_return_t +vm_map_partial_reap(vm_map_t map, unsigned int *reclaimed_resident, unsigned int *reclaimed_compressed) +{ + vm_map_entry_t entry = VM_MAP_ENTRY_NULL; + vm_map_entry_t next_entry; + kern_return_t kr = KERN_SUCCESS; + vm_map_t zap_map; - if (! object->internal) - disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL; + vm_map_lock(map); - if (m == VM_PAGE_NULL) - goto done_with_object; + /* + * We use a "zap_map" to avoid having to unlock + * the "map" in vm_map_delete(). + */ + zap_map = vm_map_create(PMAP_NULL, + map->min_offset, + map->max_offset, + map->hdr.entries_pageable); - if (m->fictitious) { - disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS; - goto done_with_object; + if (zap_map == VM_MAP_NULL) { + return KERN_RESOURCE_SHORTAGE; } - if (m->dirty || pmap_is_modified(m->phys_page)) - disposition |= VM_PAGE_QUERY_PAGE_DIRTY; - - if (m->reference || pmap_is_referenced(m->phys_page)) - disposition |= VM_PAGE_QUERY_PAGE_REF; - if (m->speculative) - disposition |= VM_PAGE_QUERY_PAGE_SPECULATIVE; + vm_map_set_page_shift(zap_map, + VM_MAP_PAGE_SHIFT(map)); + vm_map_disable_hole_optimization(zap_map); - if (m->cs_validated) - disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED; - if (m->cs_tainted) - disposition |= VM_PAGE_QUERY_PAGE_CS_TAINTED; - -done_with_object: - vm_object_unlock(object); -done: + for (entry = vm_map_first_entry(map); + entry != vm_map_to_entry(map); + entry = next_entry) { + next_entry = entry->vme_next; - switch (flavor) { - case VM_PAGE_INFO_BASIC: - basic_info = (vm_page_info_basic_t) info; - basic_info->disposition = disposition; - basic_info->ref_count = ref_count; - basic_info->object_id = (vm_object_id_t) (uintptr_t) object; - basic_info->offset = - (memory_object_offset_t) offset + offset_in_page; - basic_info->depth = depth; - break; + if (VME_OBJECT(entry) && + !entry->is_sub_map && + (VME_OBJECT(entry)->internal == TRUE) && + (VME_OBJECT(entry)->ref_count == 1)) { + *reclaimed_resident += VME_OBJECT(entry)->resident_page_count; + *reclaimed_compressed += vm_compressor_pager_get_count(VME_OBJECT(entry)->pager); + + (void)vm_map_delete(map, + entry->vme_start, + entry->vme_end, + VM_MAP_REMOVE_SAVE_ENTRIES, + zap_map); + } } - return retval; -} - -/* - * vm_map_msync - * - * Synchronises the memory range specified with its backing store - * image by either flushing or cleaning the contents to the appropriate - * memory manager engaging in a memory object synchronize dialog with - * the manager. The client doesn't return until the manager issues - * m_o_s_completed message. MIG Magically converts user task parameter - * to the task's address map. - * - * interpretation of sync_flags - * VM_SYNC_INVALIDATE - discard pages, only return precious - * pages to manager. - * - * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) - * - discard pages, write dirty or precious - * pages back to memory manager. - * - * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS - * - write dirty or precious pages back to - * the memory manager. - * - * VM_SYNC_CONTIGUOUS - does everything normally, but if there - * is a hole in the region, and we would - * have returned KERN_SUCCESS, return - * KERN_INVALID_ADDRESS instead. - * - * NOTE - * The memory object attributes have not yet been implemented, this - * function will have to deal with the invalidate attribute - * - * RETURNS - * KERN_INVALID_TASK Bad task parameter - * KERN_INVALID_ARGUMENT both sync and async were specified. - * KERN_SUCCESS The usual. - * KERN_INVALID_ADDRESS There was a hole in the region. - */ - -kern_return_t -vm_map_msync( - vm_map_t map, - vm_map_address_t address, - vm_map_size_t size, - vm_sync_t sync_flags) -{ - msync_req_t msr; - msync_req_t new_msr; - queue_chain_t req_q; /* queue of requests for this msync */ - vm_map_entry_t entry; - vm_map_size_t amount_left; - vm_object_offset_t offset; - boolean_t do_sync_req; - boolean_t had_hole = FALSE; - memory_object_t pager; - - if ((sync_flags & VM_SYNC_ASYNCHRONOUS) && - (sync_flags & VM_SYNC_SYNCHRONOUS)) - return(KERN_INVALID_ARGUMENT); + vm_map_unlock(map); /* - * align address and size on page boundaries + * Get rid of the "zap_maps" and all the map entries that + * they may still contain. */ - size = vm_map_round_page(address + size) - vm_map_trunc_page(address); - address = vm_map_trunc_page(address); + if (zap_map != VM_MAP_NULL) { + vm_map_destroy(zap_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); + zap_map = VM_MAP_NULL; + } - if (map == VM_MAP_NULL) - return(KERN_INVALID_TASK); + return kr; +} - if (size == 0) - return(KERN_SUCCESS); - queue_init(&req_q); - amount_left = size; +#if DEVELOPMENT || DEBUG - while (amount_left > 0) { - vm_object_size_t flush_size; - vm_object_t object; +int +vm_map_disconnect_page_mappings( + vm_map_t map, + boolean_t do_unnest) +{ + vm_map_entry_t entry; + int page_count = 0; + if (do_unnest == TRUE) { +#ifndef NO_NESTED_PMAP vm_map_lock(map); - if (!vm_map_lookup_entry(map, - vm_map_trunc_page(address), &entry)) { - vm_map_size_t skip; + for (entry = vm_map_first_entry(map); + entry != vm_map_to_entry(map); + entry = entry->vme_next) { + if (entry->is_sub_map && entry->use_pmap) { + /* + * Make sure the range between the start of this entry and + * the end of this entry is no longer nested, so that + * we will only remove mappings from the pmap in use by this + * this task + */ + vm_map_clip_unnest(map, entry, entry->vme_start, entry->vme_end); + } + } + vm_map_unlock(map); +#endif + } + vm_map_lock_read(map); - /* - * hole in the address map. - */ - had_hole = TRUE; + page_count = map->pmap->stats.resident_count; - /* - * Check for empty map. - */ - if (entry == vm_map_to_entry(map) && - entry->vme_next == entry) { - vm_map_unlock(map); - break; - } - /* - * Check that we don't wrap and that - * we have at least one real map entry. - */ - if ((map->hdr.nentries == 0) || - (entry->vme_next->vme_start < address)) { - vm_map_unlock(map); - break; - } - /* - * Move up to the next entry if needed - */ - skip = (entry->vme_next->vme_start - address); - if (skip >= amount_left) - amount_left = 0; - else - amount_left -= skip; - address = entry->vme_next->vme_start; - vm_map_unlock(map); + for (entry = vm_map_first_entry(map); + entry != vm_map_to_entry(map); + entry = entry->vme_next) { + if (!entry->is_sub_map && ((VME_OBJECT(entry) == 0) || + (VME_OBJECT(entry)->phys_contiguous))) { continue; } + if (entry->is_sub_map) { + assert(!entry->use_pmap); + } - offset = address - entry->vme_start; + pmap_remove_options(map->pmap, entry->vme_start, entry->vme_end, 0); + } + vm_map_unlock_read(map); - /* - * do we have more to flush than is contained in this - * entry ? - */ - if (amount_left + entry->vme_start + offset > entry->vme_end) { - flush_size = entry->vme_end - - (entry->vme_start + offset); - } else { - flush_size = amount_left; - } - amount_left -= flush_size; - address += flush_size; + return page_count; +} - if (entry->is_sub_map == TRUE) { - vm_map_t local_map; - vm_map_offset_t local_offset; +#endif - local_map = entry->object.sub_map; - local_offset = entry->offset; - vm_map_unlock(map); - if (vm_map_msync( - local_map, - local_offset, - flush_size, - sync_flags) == KERN_INVALID_ADDRESS) { - had_hole = TRUE; - } - continue; - } - object = entry->object.vm_object; - /* - * We can't sync this object if the object has not been - * created yet - */ - if (object == VM_OBJECT_NULL) { - vm_map_unlock(map); - continue; - } - offset += entry->offset; +#if CONFIG_FREEZE - vm_object_lock(object); - if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) { - int kill_pages = 0; - boolean_t reusable_pages = FALSE; +int c_freezer_swapout_page_count; +int c_freezer_compression_count = 0; +AbsoluteTime c_freezer_last_yield_ts = 0; - if (sync_flags & VM_SYNC_KILLPAGES) { - if (object->ref_count == 1 && !object->shadow) - kill_pages = 1; - else - kill_pages = -1; - } - if (kill_pages != -1) - vm_object_deactivate_pages(object, offset, - (vm_object_size_t)flush_size, kill_pages, reusable_pages); - vm_object_unlock(object); - vm_map_unlock(map); - continue; - } - /* - * We can't sync this object if there isn't a pager. - * Don't bother to sync internal objects, since there can't - * be any "permanent" storage for these objects anyway. - */ - if ((object->pager == MEMORY_OBJECT_NULL) || - (object->internal) || (object->private)) { - vm_object_unlock(object); - vm_map_unlock(map); - continue; - } - /* - * keep reference on the object until syncing is done - */ - vm_object_reference_locked(object); - vm_object_unlock(object); +extern unsigned int memorystatus_freeze_private_shared_pages_ratio; +extern unsigned int memorystatus_freeze_shared_mb_per_process_max; - vm_map_unlock(map); +kern_return_t +vm_map_freeze( + task_t task, + unsigned int *purgeable_count, + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + unsigned int *shared_count, + int *freezer_error_code, + boolean_t eval_only) +{ + vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL; + kern_return_t kr = KERN_SUCCESS; + boolean_t evaluation_phase = TRUE; + vm_object_t cur_shared_object = NULL; + int cur_shared_obj_ref_cnt = 0; + unsigned int dirty_private_count = 0, dirty_shared_count = 0, obj_pages_snapshot = 0; - do_sync_req = vm_object_sync(object, - offset, - flush_size, - sync_flags & VM_SYNC_INVALIDATE, - ((sync_flags & VM_SYNC_SYNCHRONOUS) || - (sync_flags & VM_SYNC_ASYNCHRONOUS)), - sync_flags & VM_SYNC_SYNCHRONOUS); - /* - * only send a m_o_s if we returned pages or if the entry - * is writable (ie dirty pages may have already been sent back) - */ - if (!do_sync_req) { - if ((sync_flags & VM_SYNC_INVALIDATE) && object->resident_page_count == 0) { - /* - * clear out the clustering and read-ahead hints - */ - vm_object_lock(object); + *purgeable_count = *wired_count = *clean_count = *dirty_count = *shared_count = 0; - object->pages_created = 0; - object->pages_used = 0; - object->sequential = 0; - object->last_alloc = 0; + /* + * We need the exclusive lock here so that we can + * block any page faults or lookups while we are + * in the middle of freezing this vm map. + */ + vm_map_t map = task->map; - vm_object_unlock(object); - } - vm_object_deallocate(object); - continue; + vm_map_lock(map); + + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + + if (vm_compressor_low_on_space() || vm_swap_low_on_space()) { + if (vm_compressor_low_on_space()) { + *freezer_error_code = FREEZER_ERROR_NO_COMPRESSOR_SPACE; } - msync_req_alloc(new_msr); - vm_object_lock(object); - offset += object->paging_offset; + if (vm_swap_low_on_space()) { + *freezer_error_code = FREEZER_ERROR_NO_SWAP_SPACE; + } - new_msr->offset = offset; - new_msr->length = flush_size; - new_msr->object = object; - new_msr->flag = VM_MSYNC_SYNCHRONIZING; - re_iterate: + kr = KERN_NO_SPACE; + goto done; + } + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE == FALSE) { /* - * We can't sync this object if there isn't a pager. The - * pager can disappear anytime we're not holding the object - * lock. So this has to be checked anytime we goto re_iterate. + * In-memory compressor backing the freezer. No disk. + * So no need to do the evaluation phase. */ + evaluation_phase = FALSE; - pager = object->pager; - - if (pager == MEMORY_OBJECT_NULL) { - vm_object_unlock(object); - vm_object_deallocate(object); - continue; - } - - queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { + if (eval_only == TRUE) { /* - * need to check for overlapping entry, if found, wait - * on overlapping msr to be done, then reiterate + * We don't support 'eval_only' mode + * in this non-swap config. */ - msr_lock(msr); - if (msr->flag == VM_MSYNC_SYNCHRONIZING && - ((offset >= msr->offset && - offset < (msr->offset + msr->length)) || - (msr->offset >= offset && - msr->offset < (offset + flush_size)))) - { - assert_wait((event_t) msr,THREAD_INTERRUPTIBLE); - msr_unlock(msr); - vm_object_unlock(object); - thread_block(THREAD_CONTINUE_NULL); - vm_object_lock(object); - goto re_iterate; - } - msr_unlock(msr); - }/* queue_iterate */ - - queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q); + *freezer_error_code = FREEZER_ERROR_GENERIC; + kr = KERN_INVALID_ARGUMENT; + goto done; + } - vm_object_paging_begin(object); - vm_object_unlock(object); + c_freezer_compression_count = 0; + clock_get_uptime(&c_freezer_last_yield_ts); + } +again: - queue_enter(&req_q, new_msr, msync_req_t, req_q); + for (entry2 = vm_map_first_entry(map); + entry2 != vm_map_to_entry(map); + entry2 = entry2->vme_next) { + vm_object_t src_object = VME_OBJECT(entry2); - (void) memory_object_synchronize( - pager, - offset, - flush_size, - sync_flags & ~VM_SYNC_CONTIGUOUS); + if (src_object && + !entry2->is_sub_map && + !src_object->phys_contiguous) { + /* If eligible, scan the entry, moving eligible pages over to our parent object */ - vm_object_lock(object); - vm_object_paging_end(object); - vm_object_unlock(object); - }/* while */ + if (src_object->internal == TRUE) { + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + /* + * We skip purgeable objects during evaluation phase only. + * If we decide to freeze this process, we'll explicitly + * purge these objects before we go around again with + * 'evaluation_phase' set to FALSE. + */ - /* - * wait for memory_object_sychronize_completed messages from pager(s) - */ + if ((src_object->purgable == VM_PURGABLE_EMPTY) || (src_object->purgable == VM_PURGABLE_VOLATILE)) { + /* + * We want to purge objects that may not belong to this task but are mapped + * in this task alone. Since we already purged this task's purgeable memory + * at the end of a successful evaluation phase, we want to avoid doing no-op calls + * on this task's purgeable objects. Hence the check for only volatile objects. + */ + if (evaluation_phase == FALSE && + (src_object->purgable == VM_PURGABLE_VOLATILE) && + (src_object->ref_count == 1)) { + vm_object_lock(src_object); + vm_object_purge(src_object, 0); + vm_object_unlock(src_object); + } + continue; + } - while (!queue_empty(&req_q)) { - msr = (msync_req_t)queue_first(&req_q); - msr_lock(msr); - while(msr->flag != VM_MSYNC_DONE) { - assert_wait((event_t) msr, THREAD_INTERRUPTIBLE); - msr_unlock(msr); - thread_block(THREAD_CONTINUE_NULL); - msr_lock(msr); - }/* while */ - queue_remove(&req_q, msr, msync_req_t, req_q); - msr_unlock(msr); - vm_object_deallocate(msr->object); - msync_req_free(msr); - }/* queue_iterate */ + /* + * Pages belonging to this object could be swapped to disk. + * Make sure it's not a shared object because we could end + * up just bringing it back in again. + * + * We try to optimize somewhat by checking for objects that are mapped + * more than once within our own map. But we don't do full searches, + * we just look at the entries following our current entry. + */ - /* for proper msync() behaviour */ - if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS)) - return(KERN_INVALID_ADDRESS); + if (src_object->ref_count > 1) { + if (src_object != cur_shared_object) { + obj_pages_snapshot = (src_object->resident_page_count - src_object->wired_page_count) + vm_compressor_pager_get_count(src_object->pager); + dirty_shared_count += obj_pages_snapshot; - return(KERN_SUCCESS); -}/* vm_msync */ + cur_shared_object = src_object; + cur_shared_obj_ref_cnt = 1; + continue; + } else { + cur_shared_obj_ref_cnt++; + if (src_object->ref_count == cur_shared_obj_ref_cnt) { + /* + * Fall through to below and treat this object as private. + * So deduct its pages from our shared total and add it to the + * private total. + */ + + dirty_shared_count -= obj_pages_snapshot; + dirty_private_count += obj_pages_snapshot; + } else { + continue; + } + } + } -/* - * Routine: convert_port_entry_to_map - * Purpose: - * Convert from a port specifying an entry or a task - * to a map. Doesn't consume the port ref; produces a map ref, - * which may be null. Unlike convert_port_to_map, the - * port may be task or a named entry backed. - * Conditions: - * Nothing locked. - */ + if (src_object->ref_count == 1) { + dirty_private_count += (src_object->resident_page_count - src_object->wired_page_count) + vm_compressor_pager_get_count(src_object->pager); + } -vm_map_t -convert_port_entry_to_map( - ipc_port_t port) -{ - vm_map_t map; - vm_named_entry_t named_entry; - uint32_t try_failed_count = 0; + if (evaluation_phase == TRUE) { + continue; + } + } - if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { - while(TRUE) { - ip_lock(port); - if(ip_active(port) && (ip_kotype(port) - == IKOT_NAMED_ENTRY)) { - named_entry = - (vm_named_entry_t)port->ip_kobject; - if (!(lck_mtx_try_lock(&(named_entry)->Lock))) { - ip_unlock(port); + uint32_t paged_out_count = vm_object_compressed_freezer_pageout(src_object, dirty_budget); + *wired_count += src_object->wired_page_count; - try_failed_count++; - mutex_pause(try_failed_count); - continue; - } - named_entry->ref_count++; - lck_mtx_unlock(&(named_entry)->Lock); - ip_unlock(port); - if ((named_entry->is_sub_map) && - (named_entry->protection - & VM_PROT_WRITE)) { - map = named_entry->backing.map; - } else { - mach_destroy_memory_entry(port); - return VM_MAP_NULL; + if (vm_compressor_low_on_space() || vm_swap_low_on_space()) { + if (vm_compressor_low_on_space()) { + *freezer_error_code = FREEZER_ERROR_NO_COMPRESSOR_SPACE; + } + + if (vm_swap_low_on_space()) { + *freezer_error_code = FREEZER_ERROR_NO_SWAP_SPACE; + } + + kr = KERN_NO_SPACE; + break; } - vm_map_reference_swap(map); - mach_destroy_memory_entry(port); - break; + if (paged_out_count >= dirty_budget) { + break; + } + dirty_budget -= paged_out_count; } - else - return VM_MAP_NULL; } } - else - map = convert_port_to_map(port); - return map; -} + *shared_count = (unsigned int) ((dirty_shared_count * PAGE_SIZE_64) / (1024 * 1024ULL)); + if (evaluation_phase) { + unsigned int shared_pages_threshold = (memorystatus_freeze_shared_mb_per_process_max * 1024 * 1024ULL) / PAGE_SIZE_64; -/* - * Routine: convert_port_entry_to_object - * Purpose: - * Convert from a port specifying a named entry to an - * object. Doesn't consume the port ref; produces a map ref, - * which may be null. - * Conditions: - * Nothing locked. - */ + if (dirty_shared_count > shared_pages_threshold) { + *freezer_error_code = FREEZER_ERROR_EXCESS_SHARED_MEMORY; + kr = KERN_FAILURE; + goto done; + } + if (dirty_shared_count && + ((dirty_private_count / dirty_shared_count) < memorystatus_freeze_private_shared_pages_ratio)) { + *freezer_error_code = FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO; + kr = KERN_FAILURE; + goto done; + } -vm_object_t -convert_port_entry_to_object( - ipc_port_t port) -{ - vm_object_t object; - vm_named_entry_t named_entry; - uint32_t try_failed_count = 0; + evaluation_phase = FALSE; + dirty_shared_count = dirty_private_count = 0; - if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { - while(TRUE) { - ip_lock(port); - if(ip_active(port) && (ip_kotype(port) - == IKOT_NAMED_ENTRY)) { - named_entry = - (vm_named_entry_t)port->ip_kobject; - if (!(lck_mtx_try_lock(&(named_entry)->Lock))) { - ip_unlock(port); + c_freezer_compression_count = 0; + clock_get_uptime(&c_freezer_last_yield_ts); - try_failed_count++; - mutex_pause(try_failed_count); - continue; - } - named_entry->ref_count++; - lck_mtx_unlock(&(named_entry)->Lock); - ip_unlock(port); - if ((!named_entry->is_sub_map) && - (!named_entry->is_pager) && - (named_entry->protection - & VM_PROT_WRITE)) { - object = named_entry->backing.object; - } else { - mach_destroy_memory_entry(port); - return (vm_object_t)NULL; - } - vm_object_reference(named_entry->backing.object); - mach_destroy_memory_entry(port); - break; - } - else - return (vm_object_t)NULL; + if (eval_only) { + kr = KERN_SUCCESS; + goto done; } + + vm_purgeable_purge_task_owned(task); + + goto again; } else { - return (vm_object_t)NULL; + kr = KERN_SUCCESS; } - return object; -} +done: + vm_map_unlock(map); -/* - * Export routines to other components for the things we access locally through - * macros. - */ -#undef current_map -vm_map_t -current_map(void) -{ - return (current_map_fast()); -} + if ((eval_only == FALSE) && (kr == KERN_SUCCESS)) { + vm_object_compressed_freezer_done(); -/* - * vm_map_reference: - * - * Most code internal to the osfmk will go through a - * macro defining this. This is always here for the - * use of other kernel components. - */ -#undef vm_map_reference -void -vm_map_reference( - register vm_map_t map) -{ - if (map == VM_MAP_NULL) - return; + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + /* + * reset the counter tracking the # of swapped compressed pages + * because we are now done with this freeze session and task. + */ - lck_mtx_lock(&map->s_lock); -#if TASK_SWAPPER - assert(map->res_count > 0); - assert(map->ref_count >= map->res_count); - map->res_count++; -#endif - map->ref_count++; - lck_mtx_unlock(&map->s_lock); + *dirty_count = c_freezer_swapout_page_count; //used to track pageouts + c_freezer_swapout_page_count = 0; + } + } + return kr; } +#endif + /* - * vm_map_deallocate: + * vm_map_entry_should_cow_for_true_share: * - * Removes a reference from the specified map, - * destroying it if no references remain. - * The map should not be locked. + * Determines if the map entry should be clipped and setup for copy-on-write + * to avoid applying "true_share" to a large VM object when only a subset is + * targeted. + * + * For now, we target only the map entries created for the Objective C + * Garbage Collector, which initially have the following properties: + * - alias == VM_MEMORY_MALLOC + * - wired_count == 0 + * - !needs_copy + * and a VM object with: + * - internal + * - copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC + * - !true_share + * - vo_size == ANON_CHUNK_SIZE + * + * Only non-kernel map entries. */ -void -vm_map_deallocate( - register vm_map_t map) +boolean_t +vm_map_entry_should_cow_for_true_share( + vm_map_entry_t entry) { - unsigned int ref; + vm_object_t object; - if (map == VM_MAP_NULL) - return; + if (entry->is_sub_map) { + /* entry does not point at a VM object */ + return FALSE; + } - lck_mtx_lock(&map->s_lock); - ref = --map->ref_count; - if (ref > 0) { - vm_map_res_deallocate(map); - lck_mtx_unlock(&map->s_lock); - return; + if (entry->needs_copy) { + /* already set for copy_on_write: done! */ + return FALSE; + } + + if (VME_ALIAS(entry) != VM_MEMORY_MALLOC && + VME_ALIAS(entry) != VM_MEMORY_MALLOC_SMALL) { + /* not a malloc heap or Obj-C Garbage Collector heap */ + return FALSE; + } + + if (entry->wired_count) { + /* wired: can't change the map entry... */ + vm_counters.should_cow_but_wired++; + return FALSE; + } + + object = VME_OBJECT(entry); + + if (object == VM_OBJECT_NULL) { + /* no object yet... */ + return FALSE; + } + + if (!object->internal) { + /* not an internal object */ + return FALSE; + } + + if (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) { + /* not the default copy strategy */ + return FALSE; + } + + if (object->true_share) { + /* already true_share: too late to avoid it */ + return FALSE; + } + + if (VME_ALIAS(entry) == VM_MEMORY_MALLOC && + object->vo_size != ANON_CHUNK_SIZE) { + /* ... not an object created for the ObjC Garbage Collector */ + return FALSE; + } + + if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_SMALL && + object->vo_size != 2048 * 4096) { + /* ... not a "MALLOC_SMALL" heap */ + return FALSE; } - assert(map->ref_count == 0); - lck_mtx_unlock(&map->s_lock); -#if TASK_SWAPPER /* - * The map residence count isn't decremented here because - * the vm_map_delete below will traverse the entire map, - * deleting entries, and the residence counts on objects - * and sharing maps will go away then. + * All the criteria match: we have a large object being targeted for "true_share". + * To limit the adverse side-effects linked with "true_share", tell the caller to + * try and avoid setting up the entire object for "true_share" by clipping the + * targeted range and setting it up for copy-on-write. */ -#endif - - vm_map_destroy(map, VM_MAP_NO_FLAGS); + return TRUE; } - -void -vm_map_disable_NX(vm_map_t map) +vm_map_offset_t +vm_map_round_page_mask( + vm_map_offset_t offset, + vm_map_offset_t mask) { - if (map == NULL) - return; - if (map->pmap == NULL) - return; + return VM_MAP_ROUND_PAGE(offset, mask); +} - pmap_disable_NX(map->pmap); +vm_map_offset_t +vm_map_trunc_page_mask( + vm_map_offset_t offset, + vm_map_offset_t mask) +{ + return VM_MAP_TRUNC_PAGE(offset, mask); } -void -vm_map_disallow_data_exec(vm_map_t map) +boolean_t +vm_map_page_aligned( + vm_map_offset_t offset, + vm_map_offset_t mask) { - if (map == NULL) - return; + return ((offset) & mask) == 0; +} - map->map_disallow_data_exec = TRUE; +int +vm_map_page_shift( + vm_map_t map) +{ + return VM_MAP_PAGE_SHIFT(map); } -/* XXX Consider making these constants (VM_MAX_ADDRESS and MACH_VM_MAX_ADDRESS) - * more descriptive. - */ -void -vm_map_set_32bit(vm_map_t map) +int +vm_map_page_size( + vm_map_t map) { - map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS; + return VM_MAP_PAGE_SIZE(map); } +vm_map_offset_t +vm_map_page_mask( + vm_map_t map) +{ + return VM_MAP_PAGE_MASK(map); +} -void -vm_map_set_64bit(vm_map_t map) +kern_return_t +vm_map_set_page_shift( + vm_map_t map, + int pageshift) { - map->max_offset = (vm_map_offset_t)MACH_VM_MAX_ADDRESS; + if (map->hdr.nentries != 0) { + /* too late to change page size */ + return KERN_FAILURE; + } + + map->hdr.page_shift = pageshift; + + return KERN_SUCCESS; } -vm_map_offset_t -vm_compute_max_offset(unsigned is64) +kern_return_t +vm_map_query_volatile( + vm_map_t map, + mach_vm_size_t *volatile_virtual_size_p, + mach_vm_size_t *volatile_resident_size_p, + mach_vm_size_t *volatile_compressed_size_p, + mach_vm_size_t *volatile_pmap_size_p, + mach_vm_size_t *volatile_compressed_pmap_size_p) { - return (is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS); + mach_vm_size_t volatile_virtual_size; + mach_vm_size_t volatile_resident_count; + mach_vm_size_t volatile_compressed_count; + mach_vm_size_t volatile_pmap_count; + mach_vm_size_t volatile_compressed_pmap_count; + mach_vm_size_t resident_count; + vm_map_entry_t entry; + vm_object_t object; + + /* map should be locked by caller */ + + volatile_virtual_size = 0; + volatile_resident_count = 0; + volatile_compressed_count = 0; + volatile_pmap_count = 0; + volatile_compressed_pmap_count = 0; + + for (entry = vm_map_first_entry(map); + entry != vm_map_to_entry(map); + entry = entry->vme_next) { + mach_vm_size_t pmap_resident_bytes, pmap_compressed_bytes; + + if (entry->is_sub_map) { + continue; + } + if (!(entry->protection & VM_PROT_WRITE)) { + continue; + } + object = VME_OBJECT(entry); + if (object == VM_OBJECT_NULL) { + continue; + } + if (object->purgable != VM_PURGABLE_VOLATILE && + object->purgable != VM_PURGABLE_EMPTY) { + continue; + } + if (VME_OFFSET(entry)) { + /* + * If the map entry has been split and the object now + * appears several times in the VM map, we don't want + * to count the object's resident_page_count more than + * once. We count it only for the first one, starting + * at offset 0 and ignore the other VM map entries. + */ + continue; + } + resident_count = object->resident_page_count; + if ((VME_OFFSET(entry) / PAGE_SIZE) >= resident_count) { + resident_count = 0; + } else { + resident_count -= (VME_OFFSET(entry) / PAGE_SIZE); + } + + volatile_virtual_size += entry->vme_end - entry->vme_start; + volatile_resident_count += resident_count; + if (object->pager) { + volatile_compressed_count += + vm_compressor_pager_get_count(object->pager); + } + pmap_compressed_bytes = 0; + pmap_resident_bytes = + pmap_query_resident(map->pmap, + entry->vme_start, + entry->vme_end, + &pmap_compressed_bytes); + volatile_pmap_count += (pmap_resident_bytes / PAGE_SIZE); + volatile_compressed_pmap_count += (pmap_compressed_bytes + / PAGE_SIZE); + } + + /* map is still locked on return */ + + *volatile_virtual_size_p = volatile_virtual_size; + *volatile_resident_size_p = volatile_resident_count * PAGE_SIZE; + *volatile_compressed_size_p = volatile_compressed_count * PAGE_SIZE; + *volatile_pmap_size_p = volatile_pmap_count * PAGE_SIZE; + *volatile_compressed_pmap_size_p = volatile_compressed_pmap_count * PAGE_SIZE; + + return KERN_SUCCESS; } -boolean_t -vm_map_is_64bit( - vm_map_t map) +void +vm_map_sizes(vm_map_t map, + vm_map_size_t * psize, + vm_map_size_t * pfree, + vm_map_size_t * plargest_free) { - return map->max_offset > ((vm_map_offset_t)VM_MAX_ADDRESS); + vm_map_entry_t entry; + vm_map_offset_t prev; + vm_map_size_t free, total_free, largest_free; + boolean_t end; + + if (!map) { + *psize = *pfree = *plargest_free = 0; + return; + } + total_free = largest_free = 0; + + vm_map_lock_read(map); + if (psize) { + *psize = map->max_offset - map->min_offset; + } + + prev = map->min_offset; + for (entry = vm_map_first_entry(map);; entry = entry->vme_next) { + end = (entry == vm_map_to_entry(map)); + + if (end) { + free = entry->vme_end - prev; + } else { + free = entry->vme_start - prev; + } + + total_free += free; + if (free > largest_free) { + largest_free = free; + } + + if (end) { + break; + } + prev = entry->vme_end; + } + vm_map_unlock_read(map); + if (pfree) { + *pfree = total_free; + } + if (plargest_free) { + *plargest_free = largest_free; + } } -boolean_t -vm_map_has_4GB_pagezero( - vm_map_t map) +#if VM_SCAN_FOR_SHADOW_CHAIN +int vm_map_shadow_max(vm_map_t map); +int +vm_map_shadow_max( + vm_map_t map) { - /* - * XXX FBDP - * We should lock the VM map (for read) here but we can get away - * with it for now because there can't really be any race condition: - * the VM map's min_offset is changed only when the VM map is created - * and when the zero page is established (when the binary gets loaded), - * and this routine gets called only when the task terminates and the - * VM map is being torn down, and when a new map is created via - * load_machfile()/execve(). - */ - return (map->min_offset >= 0x100000000ULL); + int shadows, shadows_max; + vm_map_entry_t entry; + vm_object_t object, next_object; + + if (map == NULL) { + return 0; + } + + shadows_max = 0; + + vm_map_lock_read(map); + + for (entry = vm_map_first_entry(map); + entry != vm_map_to_entry(map); + entry = entry->vme_next) { + if (entry->is_sub_map) { + continue; + } + object = VME_OBJECT(entry); + if (object == NULL) { + continue; + } + vm_object_lock_shared(object); + for (shadows = 0; + object->shadow != NULL; + shadows++, object = next_object) { + next_object = object->shadow; + vm_object_lock_shared(next_object); + vm_object_unlock(object); + } + vm_object_unlock(object); + if (shadows > shadows_max) { + shadows_max = shadows; + } + } + + vm_map_unlock_read(map); + + return shadows_max; } +#endif /* VM_SCAN_FOR_SHADOW_CHAIN */ void -vm_map_set_4GB_pagezero(vm_map_t map) +vm_commit_pagezero_status(vm_map_t lmap) { -#if defined(__i386__) - pmap_set_4GB_pagezero(map->pmap); -#else -#pragma unused(map) -#endif - + pmap_advise_pagezero_range(lmap->pmap, lmap->min_offset); } +#if !CONFIG_EMBEDDED void -vm_map_clear_4GB_pagezero(vm_map_t map) +vm_map_set_high_start( + vm_map_t map, + vm_map_offset_t high_start) { -#if defined(__i386__) - pmap_clear_4GB_pagezero(map->pmap); -#else -#pragma unused(map) -#endif + map->vmmap_high_start = high_start; } +#endif -/* - * Raise a VM map's minimum offset. - * To strictly enforce "page zero" reservation. - */ +#if PMAP_CS kern_return_t -vm_map_raise_min_offset( - vm_map_t map, - vm_map_offset_t new_min_offset) +vm_map_entry_cs_associate( + vm_map_t map, + vm_map_entry_t entry, + vm_map_kernel_flags_t vmk_flags) { - vm_map_entry_t first_entry; + vm_object_t cs_object, cs_shadow; + vm_object_offset_t cs_offset; + void *cs_blobs; + struct vnode *cs_vnode; + kern_return_t cs_ret; + + if (map->pmap == NULL || + entry->is_sub_map || /* XXX FBDP: recurse on sub-range? */ + VME_OBJECT(entry) == VM_OBJECT_NULL || + !(entry->protection & VM_PROT_EXECUTE)) { + return KERN_SUCCESS; + } - new_min_offset = vm_map_round_page(new_min_offset); + vm_map_lock_assert_exclusive(map); - vm_map_lock(map); + if (entry->used_for_jit) { + cs_ret = pmap_cs_associate(map->pmap, + PMAP_CS_ASSOCIATE_JIT, + entry->vme_start, + entry->vme_end - entry->vme_start); + goto done; + } - if (new_min_offset < map->min_offset) { - /* - * Can't move min_offset backwards, as that would expose - * a part of the address space that was previously, and for - * possibly good reasons, inaccessible. - */ - vm_map_unlock(map); - return KERN_INVALID_ADDRESS; + if (vmk_flags.vmkf_remap_prot_copy) { + cs_ret = pmap_cs_associate(map->pmap, + PMAP_CS_ASSOCIATE_COW, + entry->vme_start, + entry->vme_end - entry->vme_start); + goto done; } - first_entry = vm_map_first_entry(map); - if (first_entry != vm_map_to_entry(map) && - first_entry->vme_start < new_min_offset) { - /* - * Some memory was already allocated below the new - * minimun offset. It's too late to change it now... - */ - vm_map_unlock(map); - return KERN_NO_SPACE; + vm_object_lock_shared(VME_OBJECT(entry)); + cs_offset = VME_OFFSET(entry); + for (cs_object = VME_OBJECT(entry); + (cs_object != VM_OBJECT_NULL && + !cs_object->code_signed); + cs_object = cs_shadow) { + cs_shadow = cs_object->shadow; + if (cs_shadow != VM_OBJECT_NULL) { + cs_offset += cs_object->vo_shadow_offset; + vm_object_lock_shared(cs_shadow); + } + vm_object_unlock(cs_object); + } + if (cs_object == VM_OBJECT_NULL) { + return KERN_SUCCESS; } - map->min_offset = new_min_offset; + cs_offset += cs_object->paging_offset; + cs_vnode = vnode_pager_lookup_vnode(cs_object->pager); + cs_ret = vnode_pager_get_cs_blobs(cs_vnode, + &cs_blobs); + assert(cs_ret == KERN_SUCCESS); + cs_ret = cs_associate_blob_with_mapping(map->pmap, + entry->vme_start, + (entry->vme_end - + entry->vme_start), + cs_offset, + cs_blobs); + vm_object_unlock(cs_object); + cs_object = VM_OBJECT_NULL; - vm_map_unlock(map); +done: + if (cs_ret == KERN_SUCCESS) { + DTRACE_VM2(vm_map_entry_cs_associate_success, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end); + if (vm_map_executable_immutable) { + /* + * Prevent this executable + * mapping from being unmapped + * or modified. + */ + entry->permanent = TRUE; + } + /* + * pmap says it will validate the + * code-signing validity of pages + * faulted in via this mapping, so + * this map entry should be marked so + * that vm_fault() bypasses code-signing + * validation for faults coming through + * this mapping. + */ + entry->pmap_cs_associated = TRUE; + } else if (cs_ret == KERN_NOT_SUPPORTED) { + /* + * pmap won't check the code-signing + * validity of pages faulted in via + * this mapping, so VM should keep + * doing it. + */ + DTRACE_VM3(vm_map_entry_cs_associate_off, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + int, cs_ret); + } else { + /* + * A real error: do not allow + * execution in this mapping. + */ + DTRACE_VM3(vm_map_entry_cs_associate_failure, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + int, cs_ret); + entry->protection &= ~VM_PROT_EXECUTE; + entry->max_protection &= ~VM_PROT_EXECUTE; + } - return KERN_SUCCESS; + return cs_ret; } +#endif /* PMAP_CS */ /* - * Set the limit on the maximum amount of user wired memory allowed for this map. - * This is basically a copy of the MEMLOCK rlimit value maintained by the BSD side of - * the kernel. The limits are checked in the mach VM side, so we keep a copy so we - * don't have to reach over to the BSD data structures. + * FORKED CORPSE FOOTPRINT + * + * A forked corpse gets a copy of the original VM map but its pmap is mostly + * empty since it never ran and never got to fault in any pages. + * Collecting footprint info (via "sysctl vm.self_region_footprint") for + * a forked corpse would therefore return very little information. + * + * When forking a corpse, we can pass the VM_MAP_FORK_CORPSE_FOOTPRINT option + * to vm_map_fork() to collect footprint information from the original VM map + * and its pmap, and store it in the forked corpse's VM map. That information + * is stored in place of the VM map's "hole list" since we'll never need to + * lookup for holes in the corpse's map. + * + * The corpse's footprint info looks like this: + * + * vm_map->vmmap_corpse_footprint points to pageable kernel memory laid out + * as follows: + * +---------------------------------------+ + * header-> | cf_size | + * +-------------------+-------------------+ + * | cf_last_region | cf_last_zeroes | + * +-------------------+-------------------+ + * region1-> | cfr_vaddr | + * +-------------------+-------------------+ + * | cfr_num_pages | d0 | d1 | d2 | d3 | + * +---------------------------------------+ + * | d4 | d5 | ... | + * +---------------------------------------+ + * | ... | + * +-------------------+-------------------+ + * | dy | dz | na | na | cfr_vaddr... | <-region2 + * +-------------------+-------------------+ + * | cfr_vaddr (ctd) | cfr_num_pages | + * +---------------------------------------+ + * | d0 | d1 ... | + * +---------------------------------------+ + * ... + * +---------------------------------------+ + * last region-> | cfr_vaddr | + * +---------------------------------------+ + * + cfr_num_pages | d0 | d1 | d2 | d3 | + * +---------------------------------------+ + * ... + * +---------------------------------------+ + * | dx | dy | dz | na | na | na | na | na | + * +---------------------------------------+ + * + * where: + * cf_size: total size of the buffer (rounded to page size) + * cf_last_region: offset in the buffer of the last "region" sub-header + * cf_last_zeroes: number of trailing "zero" dispositions at the end + * of last region + * cfr_vaddr: virtual address of the start of the covered "region" + * cfr_num_pages: number of pages in the covered "region" + * d*: disposition of the page at that virtual address + * Regions in the buffer are word-aligned. + * + * We estimate the size of the buffer based on the number of memory regions + * and the virtual size of the address space. While copying each memory region + * during vm_map_fork(), we also collect the footprint info for that region + * and store it in the buffer, packing it as much as possible (coalescing + * contiguous memory regions to avoid having too many region headers and + * avoiding long streaks of "zero" page dispositions by splitting footprint + * "regions", so the number of regions in the footprint buffer might not match + * the number of memory regions in the address space. + * + * We also have to copy the original task's "nonvolatile" ledgers since that's + * part of the footprint and will need to be reported to any tool asking for + * the footprint information of the forked corpse. */ -void -vm_map_set_user_wire_limit(vm_map_t map, - vm_size_t limit) +uint64_t vm_map_corpse_footprint_count = 0; +uint64_t vm_map_corpse_footprint_size_avg = 0; +uint64_t vm_map_corpse_footprint_size_max = 0; +uint64_t vm_map_corpse_footprint_full = 0; +uint64_t vm_map_corpse_footprint_no_buf = 0; + +/* + * vm_map_corpse_footprint_new_region: + * closes the current footprint "region" and creates a new one + * + * Returns NULL if there's not enough space in the buffer for a new region. + */ +static struct vm_map_corpse_footprint_region * +vm_map_corpse_footprint_new_region( + struct vm_map_corpse_footprint_header *footprint_header) { - map->user_wire_limit = limit; -} + uintptr_t footprint_edge; + uint32_t new_region_offset; + struct vm_map_corpse_footprint_region *footprint_region; + struct vm_map_corpse_footprint_region *new_footprint_region; + + footprint_edge = ((uintptr_t)footprint_header + + footprint_header->cf_size); + footprint_region = ((struct vm_map_corpse_footprint_region *) + ((char *)footprint_header + + footprint_header->cf_last_region)); + assert((uintptr_t)footprint_region + sizeof(*footprint_region) <= + footprint_edge); + + /* get rid of trailing zeroes in the last region */ + assert(footprint_region->cfr_num_pages >= + footprint_header->cf_last_zeroes); + footprint_region->cfr_num_pages -= + footprint_header->cf_last_zeroes; + footprint_header->cf_last_zeroes = 0; + + /* reuse this region if it's now empty */ + if (footprint_region->cfr_num_pages == 0) { + return footprint_region; + } + + /* compute offset of new region */ + new_region_offset = footprint_header->cf_last_region; + new_region_offset += sizeof(*footprint_region); + new_region_offset += footprint_region->cfr_num_pages; + new_region_offset = roundup(new_region_offset, sizeof(int)); + + /* check if we're going over the edge */ + if (((uintptr_t)footprint_header + + new_region_offset + + sizeof(*footprint_region)) >= + footprint_edge) { + /* over the edge: no new region */ + return NULL; + } + /* adjust offset of last region in header */ + footprint_header->cf_last_region = new_region_offset; -void vm_map_switch_protect(vm_map_t map, - boolean_t val) -{ - vm_map_lock(map); - map->switch_protect=val; - vm_map_unlock(map); + new_footprint_region = (struct vm_map_corpse_footprint_region *) + ((char *)footprint_header + + footprint_header->cf_last_region); + new_footprint_region->cfr_vaddr = 0; + new_footprint_region->cfr_num_pages = 0; + /* caller needs to initialize new region */ + + return new_footprint_region; } -/* Add (generate) code signature for memory range */ -#if CONFIG_DYNAMIC_CODE_SIGNING -kern_return_t vm_map_sign(vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) +/* + * vm_map_corpse_footprint_collect: + * collect footprint information for "old_entry" in "old_map" and + * stores it in "new_map"'s vmmap_footprint_info. + */ +kern_return_t +vm_map_corpse_footprint_collect( + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map) { - vm_map_entry_t entry; - vm_page_t m; - vm_object_t object; - - /* - * Vet all the input parameters and current type and state of the - * underlaying object. Return with an error if anything is amiss. - */ - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); - - vm_map_lock_read(map); - - if (!vm_map_lookup_entry(map, start, &entry) || entry->is_sub_map) { + vm_map_offset_t va; + int disp; + kern_return_t kr; + struct vm_map_corpse_footprint_header *footprint_header; + struct vm_map_corpse_footprint_region *footprint_region; + struct vm_map_corpse_footprint_region *new_footprint_region; + unsigned char *next_disp_p; + uintptr_t footprint_edge; + uint32_t num_pages_tmp; + + va = old_entry->vme_start; + + vm_map_lock_assert_exclusive(old_map); + vm_map_lock_assert_exclusive(new_map); + + assert(new_map->has_corpse_footprint); + assert(!old_map->has_corpse_footprint); + if (!new_map->has_corpse_footprint || + old_map->has_corpse_footprint) { /* - * Must pass a valid non-submap address. + * This can only transfer footprint info from a + * map with a live pmap to a map with a corpse footprint. */ - vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_NOT_SUPPORTED; } - - if((entry->vme_start > start) || (entry->vme_end < end)) { + + if (new_map->vmmap_corpse_footprint == NULL) { + vm_offset_t buf; + vm_size_t buf_size; + + buf = 0; + buf_size = (sizeof(*footprint_header) + + (old_map->hdr.nentries + * + (sizeof(*footprint_region) + + +3)) /* potential alignment for each region */ + + + ((old_map->size / PAGE_SIZE) + * + sizeof(char))); /* disposition for each page */ +// printf("FBDP corpse map %p guestimate footprint size 0x%llx\n", new_map, (uint64_t) buf_size); + buf_size = round_page(buf_size); + + /* limit buffer to 1 page to validate overflow detection */ +// buf_size = PAGE_SIZE; + + /* limit size to a somewhat sane amount */ +#if CONFIG_EMBEDDED +#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (256*1024) /* 256KB */ +#else /* CONFIG_EMBEDDED */ +#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (8*1024*1024) /* 8MB */ +#endif /* CONFIG_EMBEDDED */ + if (buf_size > VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE) { + buf_size = VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE; + } + /* - * Map entry doesn't cover the requested range. Not handling - * this situation currently. + * Allocate the pageable buffer (with a trailing guard page). + * It will be zero-filled on demand. */ - vm_map_unlock_read(map); - return(KERN_INVALID_ARGUMENT); + kr = kernel_memory_allocate(kernel_map, + &buf, + (buf_size + + PAGE_SIZE), /* trailing guard page */ + 0, /* mask */ + KMA_PAGEABLE | KMA_GUARD_LAST, + VM_KERN_MEMORY_DIAG); + if (kr != KERN_SUCCESS) { + vm_map_corpse_footprint_no_buf++; + return kr; + } + + /* initialize header and 1st region */ + footprint_header = (struct vm_map_corpse_footprint_header *)buf; + new_map->vmmap_corpse_footprint = footprint_header; + + footprint_header->cf_size = buf_size; + footprint_header->cf_last_region = + sizeof(*footprint_header); + footprint_header->cf_last_zeroes = 0; + + footprint_region = (struct vm_map_corpse_footprint_region *) + ((char *)footprint_header + + footprint_header->cf_last_region); + footprint_region->cfr_vaddr = 0; + footprint_region->cfr_num_pages = 0; + } else { + /* retrieve header and last region */ + footprint_header = (struct vm_map_corpse_footprint_header *) + new_map->vmmap_corpse_footprint; + footprint_region = (struct vm_map_corpse_footprint_region *) + ((char *)footprint_header + + footprint_header->cf_last_region); } - - object = entry->object.vm_object; - if (object == VM_OBJECT_NULL) { + footprint_edge = ((uintptr_t)footprint_header + + footprint_header->cf_size); + + if ((footprint_region->cfr_vaddr + + (((vm_map_offset_t)footprint_region->cfr_num_pages) * + PAGE_SIZE)) + != old_entry->vme_start) { + uint64_t num_pages_delta; + uint32_t region_offset_delta; + /* - * Object must already be present or we can't sign. + * Not the next contiguous virtual address: + * start a new region or store "zero" dispositions for + * the missing pages? */ - vm_map_unlock_read(map); - return KERN_INVALID_ARGUMENT; + /* size of gap in actual page dispositions */ + num_pages_delta = (((old_entry->vme_start - + footprint_region->cfr_vaddr) / PAGE_SIZE) + - footprint_region->cfr_num_pages); + /* size of gap as a new footprint region header */ + region_offset_delta = + (sizeof(*footprint_region) + + roundup((footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes), + sizeof(int)) - + (footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes)); +// printf("FBDP %s:%d region 0x%x 0x%llx 0x%x vme_start 0x%llx pages_delta 0x%llx region_delta 0x%x\n", __FUNCTION__, __LINE__, footprint_header->cf_last_region, footprint_region->cfr_vaddr, footprint_region->cfr_num_pages, old_entry->vme_start, num_pages_delta, region_offset_delta); + if (region_offset_delta < num_pages_delta || + os_add3_overflow(footprint_region->cfr_num_pages, + (uint32_t) num_pages_delta, + 1, + &num_pages_tmp)) { + /* + * Storing data for this gap would take more space + * than inserting a new footprint region header: + * let's start a new region and save space. If it's a + * tie, let's avoid using a new region, since that + * would require more region hops to find the right + * range during lookups. + * + * If the current region's cfr_num_pages would overflow + * if we added "zero" page dispositions for the gap, + * no choice but to start a new region. + */ +// printf("FBDP %s:%d new region\n", __FUNCTION__, __LINE__); + new_footprint_region = + vm_map_corpse_footprint_new_region(footprint_header); + /* check that we're not going over the edge */ + if (new_footprint_region == NULL) { + goto over_the_edge; + } + footprint_region = new_footprint_region; + /* initialize new region as empty */ + footprint_region->cfr_vaddr = old_entry->vme_start; + footprint_region->cfr_num_pages = 0; + } else { + /* + * Store "zero" page dispositions for the missing + * pages. + */ +// printf("FBDP %s:%d zero gap\n", __FUNCTION__, __LINE__); + for (; num_pages_delta > 0; num_pages_delta--) { + next_disp_p = + ((unsigned char *) footprint_region + + sizeof(*footprint_region) + + footprint_region->cfr_num_pages); + /* check that we're not going over the edge */ + if ((uintptr_t)next_disp_p >= footprint_edge) { + goto over_the_edge; + } + /* store "zero" disposition for this gap page */ + footprint_region->cfr_num_pages++; + *next_disp_p = (unsigned char) 0; + footprint_header->cf_last_zeroes++; + } + } } - - vm_object_lock(object); - vm_map_unlock_read(map); - - while(start < end) { - uint32_t refmod; - - m = vm_page_lookup(object, start - entry->vme_start + entry->offset ); - if (m==VM_PAGE_NULL) { - /* shoud we try to fault a page here? we can probably - * demand it exists and is locked for this request */ - vm_object_unlock(object); - return KERN_FAILURE; + + for (va = old_entry->vme_start; + va < old_entry->vme_end; + va += PAGE_SIZE) { + vm_object_t object; + + object = VME_OBJECT(old_entry); + if (!old_entry->is_sub_map && + old_entry->iokit_acct && + object != VM_OBJECT_NULL && + object->internal && + object->purgable == VM_PURGABLE_DENY) { + /* + * Non-purgeable IOKit memory: phys_footprint + * includes the entire virtual mapping. + * Since the forked corpse's VM map entry will not + * have "iokit_acct", pretend that this page's + * disposition is "present & internal", so that it + * shows up in the forked corpse's footprint. + */ + disp = (PMAP_QUERY_PAGE_PRESENT | + PMAP_QUERY_PAGE_INTERNAL); + } else { + disp = 0; + pmap_query_page_info(old_map->pmap, + va, + &disp); } - /* deal with special page status */ - if (m->busy || - (m->unusual && (m->error || m->restart || m->private || m->absent))) { - vm_object_unlock(object); - return KERN_FAILURE; + +// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP collect map %p va 0x%llx disp 0x%x\n", new_map, va, disp); + + if (disp == 0 && footprint_region->cfr_num_pages == 0) { + /* + * Ignore "zero" dispositions at start of + * region: just move start of region. + */ + footprint_region->cfr_vaddr += PAGE_SIZE; + continue; } - - /* Page is OK... now "validate" it */ - /* This is the place where we'll call out to create a code - * directory, later */ - m->cs_validated = TRUE; - /* The page is now "clean" for codesigning purposes. That means - * we don't consider it as modified (wpmapped) anymore. But - * we'll disconnect the page so we note any future modification - * attempts. */ - m->wpmapped = FALSE; - refmod = pmap_disconnect(m->phys_page); - - /* Pull the dirty status from the pmap, since we cleared the - * wpmapped bit */ - if ((refmod & VM_MEM_MODIFIED) && !m->dirty) { - m->dirty = TRUE; + /* would region's cfr_num_pages overflow? */ + if (os_add_overflow(footprint_region->cfr_num_pages, 1, + &num_pages_tmp)) { + /* overflow: create a new region */ + new_footprint_region = + vm_map_corpse_footprint_new_region( + footprint_header); + if (new_footprint_region == NULL) { + goto over_the_edge; + } + footprint_region = new_footprint_region; + footprint_region->cfr_vaddr = va; + footprint_region->cfr_num_pages = 0; } - - /* On to the next page */ - start += PAGE_SIZE; - } - vm_object_unlock(object); - - return KERN_SUCCESS; -} -#endif -#if CONFIG_FREEZE + next_disp_p = ((unsigned char *)footprint_region + + sizeof(*footprint_region) + + footprint_region->cfr_num_pages); + /* check that we're not going over the edge */ + if ((uintptr_t)next_disp_p >= footprint_edge) { + goto over_the_edge; + } + /* store this dispostion */ + *next_disp_p = (unsigned char) disp; + footprint_region->cfr_num_pages++; -kern_return_t vm_map_freeze_walk( - vm_map_t map, - unsigned int *purgeable_count, - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - boolean_t *has_shared) -{ - vm_map_entry_t entry; - - vm_map_lock_read(map); - - *purgeable_count = *wired_count = *clean_count = *dirty_count = 0; - *has_shared = FALSE; - - for (entry = vm_map_first_entry(map); - entry != vm_map_to_entry(map); - entry = entry->vme_next) { - unsigned int purgeable, clean, dirty, wired; - boolean_t shared; - - if ((entry->object.vm_object == 0) || - (entry->is_sub_map) || - (entry->object.vm_object->phys_contiguous)) { + if (disp != 0) { + /* non-zero disp: break the current zero streak */ + footprint_header->cf_last_zeroes = 0; + /* done */ continue; } - vm_object_pack(&purgeable, &wired, &clean, &dirty, &shared, entry->object.vm_object, VM_OBJECT_NULL, NULL, NULL); - - *purgeable_count += purgeable; - *wired_count += wired; - *clean_count += clean; - *dirty_count += dirty; - - if (shared) { - *has_shared = TRUE; + /* zero disp: add to the current streak of zeroes */ + footprint_header->cf_last_zeroes++; + if ((footprint_header->cf_last_zeroes + + roundup((footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes) & + (sizeof(int) - 1), + sizeof(int))) < + (sizeof(*footprint_header))) { + /* + * There are not enough trailing "zero" dispositions + * (+ the extra padding we would need for the previous + * region); creating a new region would not save space + * at this point, so let's keep this "zero" disposition + * in this region and reconsider later. + */ + continue; } + /* + * Create a new region to avoid having too many consecutive + * "zero" dispositions. + */ + new_footprint_region = + vm_map_corpse_footprint_new_region(footprint_header); + if (new_footprint_region == NULL) { + goto over_the_edge; + } + footprint_region = new_footprint_region; + /* initialize the new region as empty ... */ + footprint_region->cfr_num_pages = 0; + /* ... and skip this "zero" disp */ + footprint_region->cfr_vaddr = va + PAGE_SIZE; } - vm_map_unlock_read(map); - return KERN_SUCCESS; -} -kern_return_t vm_map_freeze( - vm_map_t map, - unsigned int *purgeable_count, - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - boolean_t *has_shared) -{ - vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL; - vm_object_t compact_object = VM_OBJECT_NULL; - vm_object_offset_t offset = 0x0; - kern_return_t kr = KERN_SUCCESS; - void *default_freezer_toc = NULL; - boolean_t cleanup = FALSE; +over_the_edge: +// printf("FBDP map %p footprint was full for va 0x%llx\n", new_map, va); + vm_map_corpse_footprint_full++; + return KERN_RESOURCE_SHORTAGE; +} - *purgeable_count = *wired_count = *clean_count = *dirty_count = 0; - *has_shared = FALSE; +/* + * vm_map_corpse_footprint_collect_done: + * completes the footprint collection by getting rid of any remaining + * trailing "zero" dispositions and trimming the unused part of the + * kernel buffer + */ +void +vm_map_corpse_footprint_collect_done( + vm_map_t new_map) +{ + struct vm_map_corpse_footprint_header *footprint_header; + struct vm_map_corpse_footprint_region *footprint_region; + vm_size_t buf_size, actual_size; + kern_return_t kr; + + assert(new_map->has_corpse_footprint); + if (!new_map->has_corpse_footprint || + new_map->vmmap_corpse_footprint == NULL) { + return; + } - /* Create our compact object */ - compact_object = vm_object_allocate((vm_map_offset_t)(VM_MAX_ADDRESS) - (vm_map_offset_t)(VM_MIN_ADDRESS)); - if (!compact_object) { - kr = KERN_FAILURE; - goto done; + footprint_header = (struct vm_map_corpse_footprint_header *) + new_map->vmmap_corpse_footprint; + buf_size = footprint_header->cf_size; + + footprint_region = (struct vm_map_corpse_footprint_region *) + ((char *)footprint_header + + footprint_header->cf_last_region); + + /* get rid of trailing zeroes in last region */ + assert(footprint_region->cfr_num_pages >= footprint_header->cf_last_zeroes); + footprint_region->cfr_num_pages -= footprint_header->cf_last_zeroes; + footprint_header->cf_last_zeroes = 0; + + actual_size = (vm_size_t)(footprint_header->cf_last_region + + sizeof(*footprint_region) + + footprint_region->cfr_num_pages); + +// printf("FBDP map %p buf_size 0x%llx actual_size 0x%llx\n", new_map, (uint64_t) buf_size, (uint64_t) actual_size); + vm_map_corpse_footprint_size_avg = + (((vm_map_corpse_footprint_size_avg * + vm_map_corpse_footprint_count) + + actual_size) / + (vm_map_corpse_footprint_count + 1)); + vm_map_corpse_footprint_count++; + if (actual_size > vm_map_corpse_footprint_size_max) { + vm_map_corpse_footprint_size_max = actual_size; } - - default_freezer_toc = default_freezer_mapping_create(compact_object, offset); - if (!default_freezer_toc) { - kr = KERN_FAILURE; - goto done; + + actual_size = round_page(actual_size); + if (buf_size > actual_size) { + kr = vm_deallocate(kernel_map, + ((vm_address_t)footprint_header + + actual_size + + PAGE_SIZE), /* trailing guard page */ + (buf_size - actual_size)); + assertf(kr == KERN_SUCCESS, + "trim: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n", + footprint_header, + (uint64_t) buf_size, + (uint64_t) actual_size, + kr); + kr = vm_protect(kernel_map, + ((vm_address_t)footprint_header + + actual_size), + PAGE_SIZE, + FALSE, /* set_maximum */ + VM_PROT_NONE); + assertf(kr == KERN_SUCCESS, + "guard: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n", + footprint_header, + (uint64_t) buf_size, + (uint64_t) actual_size, + kr); } - /* - * We need the exclusive lock here so that we can - * block any page faults or lookups while we are - * in the middle of freezing this vm map. - */ - vm_map_lock(map); + footprint_header->cf_size = actual_size; +} - if (map->default_freezer_toc != NULL){ - /* - * This map has already been frozen. - */ - cleanup = TRUE; - kr = KERN_SUCCESS; +/* + * vm_map_corpse_footprint_query_page_info: + * retrieves the disposition of the page at virtual address "vaddr" + * in the forked corpse's VM map + * + * This is the equivalent of pmap_query_page_info() for a forked corpse. + */ +kern_return_t +vm_map_corpse_footprint_query_page_info( + vm_map_t map, + vm_map_offset_t va, + int *disp) +{ + struct vm_map_corpse_footprint_header *footprint_header; + struct vm_map_corpse_footprint_region *footprint_region; + uint32_t footprint_region_offset; + vm_map_offset_t region_start, region_end; + int disp_idx; + kern_return_t kr; + + if (!map->has_corpse_footprint) { + *disp = 0; + kr = KERN_INVALID_ARGUMENT; goto done; } - /* Get a mapping in place for the freezing about to commence */ - map->default_freezer_toc = default_freezer_toc; - - vm_object_lock(compact_object); + footprint_header = map->vmmap_corpse_footprint; + if (footprint_header == NULL) { + *disp = 0; +// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp); + kr = KERN_INVALID_ARGUMENT; + goto done; + } - for (entry2 = vm_map_first_entry(map); - entry2 != vm_map_to_entry(map); - entry2 = entry2->vme_next) { - - vm_object_t src_object = entry2->object.vm_object; + /* start looking at the hint ("cf_hint_region") */ + footprint_region_offset = footprint_header->cf_hint_region; - /* If eligible, scan the entry, moving eligible pages over to our parent object */ - if (entry2->object.vm_object && !entry2->is_sub_map && !entry2->object.vm_object->phys_contiguous) { - unsigned int purgeable, clean, dirty, wired; - boolean_t shared; - - vm_object_pack(&purgeable, &wired, &clean, &dirty, &shared, - src_object, compact_object, &default_freezer_toc, &offset); - - *purgeable_count += purgeable; - *wired_count += wired; - *clean_count += clean; - *dirty_count += dirty; +lookup_again: + if (footprint_region_offset < sizeof(*footprint_header)) { + /* hint too low: start from 1st region */ + footprint_region_offset = sizeof(*footprint_header); + } + if (footprint_region_offset >= footprint_header->cf_last_region) { + /* hint too high: re-start from 1st region */ + footprint_region_offset = sizeof(*footprint_header); + } + footprint_region = (struct vm_map_corpse_footprint_region *) + ((char *)footprint_header + footprint_region_offset); + region_start = footprint_region->cfr_vaddr; + region_end = (region_start + + ((vm_map_offset_t)(footprint_region->cfr_num_pages) * + PAGE_SIZE)); + if (va < region_start && + footprint_region_offset != sizeof(*footprint_header)) { + /* our range starts before the hint region */ + + /* reset the hint (in a racy way...) */ + footprint_header->cf_hint_region = sizeof(*footprint_header); + /* lookup "va" again from 1st region */ + footprint_region_offset = sizeof(*footprint_header); + goto lookup_again; + } - if (shared) { - *has_shared = TRUE; - } + while (va >= region_end) { + if (footprint_region_offset >= footprint_header->cf_last_region) { + break; } + /* skip the region's header */ + footprint_region_offset += sizeof(*footprint_region); + /* skip the region's page dispositions */ + footprint_region_offset += footprint_region->cfr_num_pages; + /* align to next word boundary */ + footprint_region_offset = + roundup(footprint_region_offset, + sizeof(int)); + footprint_region = (struct vm_map_corpse_footprint_region *) + ((char *)footprint_header + footprint_region_offset); + region_start = footprint_region->cfr_vaddr; + region_end = (region_start + + ((vm_map_offset_t)(footprint_region->cfr_num_pages) * + PAGE_SIZE)); + } + if (va < region_start || va >= region_end) { + /* page not found */ + *disp = 0; +// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp); + kr = KERN_SUCCESS; + goto done; } - vm_object_unlock(compact_object); - - /* Finally, throw out the pages to swap */ - vm_object_pageout(compact_object); + /* "va" found: set the lookup hint for next lookup (in a racy way...) */ + footprint_header->cf_hint_region = footprint_region_offset; + /* get page disposition for "va" in this region */ + disp_idx = (int) ((va - footprint_region->cfr_vaddr) / PAGE_SIZE); + *disp = (int) (footprint_region->cfr_disposition[disp_idx]); + + kr = KERN_SUCCESS; done: - vm_map_unlock(map); +// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp); + /* dtrace -n 'vminfo:::footprint_query_page_info { printf("map 0x%p va 0x%llx disp 0x%x kr 0x%x", arg0, arg1, arg2, arg3); }' */ + DTRACE_VM4(footprint_query_page_info, + vm_map_t, map, + vm_map_offset_t, va, + int, *disp, + kern_return_t, kr); - /* Unwind if there was a failure */ - if ((cleanup) || (KERN_SUCCESS != kr)) { - if (default_freezer_toc){ - default_freezer_mapping_free(&map->default_freezer_toc, TRUE); - } - if (compact_object){ - vm_object_deallocate(compact_object); - } - } - return kr; } -__private_extern__ vm_object_t default_freezer_get_compact_vm_object( void** ); +static void +vm_map_corpse_footprint_destroy( + vm_map_t map) +{ + if (map->has_corpse_footprint && + map->vmmap_corpse_footprint != 0) { + struct vm_map_corpse_footprint_header *footprint_header; + vm_size_t buf_size; + kern_return_t kr; + + footprint_header = map->vmmap_corpse_footprint; + buf_size = footprint_header->cf_size; + kr = vm_deallocate(kernel_map, + (vm_offset_t) map->vmmap_corpse_footprint, + ((vm_size_t) buf_size + + PAGE_SIZE)); /* trailing guard page */ + assertf(kr == KERN_SUCCESS, "kr=0x%x\n", kr); + map->vmmap_corpse_footprint = 0; + map->has_corpse_footprint = FALSE; + } +} + +/* + * vm_map_copy_footprint_ledgers: + * copies any ledger that's relevant to the memory footprint of "old_task" + * into the forked corpse's task ("new_task") + */ +void +vm_map_copy_footprint_ledgers( + task_t old_task, + task_t new_task) +{ + vm_map_copy_ledger(old_task, new_task, task_ledgers.phys_footprint); + vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile); + vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.internal); + vm_map_copy_ledger(old_task, new_task, task_ledgers.internal_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.iokit_mapped); + vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting); + vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.page_table); + vm_map_copy_ledger(old_task, new_task, task_ledgers.tagged_footprint); + vm_map_copy_ledger(old_task, new_task, task_ledgers.tagged_footprint_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile); + vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.media_footprint); + vm_map_copy_ledger(old_task, new_task, task_ledgers.media_footprint_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.graphics_footprint); + vm_map_copy_ledger(old_task, new_task, task_ledgers.graphics_footprint_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.neural_footprint); + vm_map_copy_ledger(old_task, new_task, task_ledgers.neural_footprint_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.wired_mem); +} + +/* + * vm_map_copy_ledger: + * copy a single ledger from "old_task" to "new_task" + */ void -vm_map_thaw( - vm_map_t map) +vm_map_copy_ledger( + task_t old_task, + task_t new_task, + int ledger_entry) { - void **default_freezer_toc; - vm_object_t compact_object; + ledger_amount_t old_balance, new_balance, delta; - vm_map_lock(map); + assert(new_task->map->has_corpse_footprint); + if (!new_task->map->has_corpse_footprint) { + return; + } - if (map->default_freezer_toc == NULL){ - /* - * This map is not in a frozen state. - */ - goto out; + /* turn off sanity checks for the ledger we're about to mess with */ + ledger_disable_panic_on_negative(new_task->ledger, + ledger_entry); + + /* adjust "new_task" to match "old_task" */ + ledger_get_balance(old_task->ledger, + ledger_entry, + &old_balance); + ledger_get_balance(new_task->ledger, + ledger_entry, + &new_balance); + if (new_balance == old_balance) { + /* new == old: done */ + } else if (new_balance > old_balance) { + /* new > old ==> new -= new - old */ + delta = new_balance - old_balance; + ledger_debit(new_task->ledger, + ledger_entry, + delta); + } else { + /* new < old ==> new += old - new */ + delta = old_balance - new_balance; + ledger_credit(new_task->ledger, + ledger_entry, + delta); } - - default_freezer_toc = &(map->default_freezer_toc); - - compact_object = default_freezer_get_compact_vm_object(default_freezer_toc); - - /* Bring the pages back in */ - vm_object_pagein(compact_object); - - /* Shift pages back to their original objects */ - vm_object_unpack(compact_object, default_freezer_toc); +} - vm_object_deallocate(compact_object); +#if MACH_ASSERT - map->default_freezer_toc = NULL; - -out: - vm_map_unlock(map); +extern int pmap_ledgers_panic; +extern int pmap_ledgers_panic_leeway; + +#define LEDGER_DRIFT(__LEDGER) \ + int __LEDGER##_over; \ + ledger_amount_t __LEDGER##_over_total; \ + ledger_amount_t __LEDGER##_over_max; \ + int __LEDGER##_under; \ + ledger_amount_t __LEDGER##_under_total; \ + ledger_amount_t __LEDGER##_under_max + +struct { + uint64_t num_pmaps_checked; + + LEDGER_DRIFT(phys_footprint); + LEDGER_DRIFT(internal); + LEDGER_DRIFT(internal_compressed); + LEDGER_DRIFT(iokit_mapped); + LEDGER_DRIFT(alternate_accounting); + LEDGER_DRIFT(alternate_accounting_compressed); + LEDGER_DRIFT(page_table); + LEDGER_DRIFT(purgeable_volatile); + LEDGER_DRIFT(purgeable_nonvolatile); + LEDGER_DRIFT(purgeable_volatile_compressed); + LEDGER_DRIFT(purgeable_nonvolatile_compressed); + LEDGER_DRIFT(tagged_nofootprint); + LEDGER_DRIFT(tagged_footprint); + LEDGER_DRIFT(tagged_nofootprint_compressed); + LEDGER_DRIFT(tagged_footprint_compressed); + LEDGER_DRIFT(network_volatile); + LEDGER_DRIFT(network_nonvolatile); + LEDGER_DRIFT(network_volatile_compressed); + LEDGER_DRIFT(network_nonvolatile_compressed); + LEDGER_DRIFT(media_nofootprint); + LEDGER_DRIFT(media_footprint); + LEDGER_DRIFT(media_nofootprint_compressed); + LEDGER_DRIFT(media_footprint_compressed); + LEDGER_DRIFT(graphics_nofootprint); + LEDGER_DRIFT(graphics_footprint); + LEDGER_DRIFT(graphics_nofootprint_compressed); + LEDGER_DRIFT(graphics_footprint_compressed); + LEDGER_DRIFT(neural_nofootprint); + LEDGER_DRIFT(neural_footprint); + LEDGER_DRIFT(neural_nofootprint_compressed); + LEDGER_DRIFT(neural_footprint_compressed); +} pmap_ledgers_drift; + +void +vm_map_pmap_check_ledgers( + pmap_t pmap, + ledger_t ledger, + int pid, + char *procname) +{ + ledger_amount_t bal; + boolean_t do_panic; + + do_panic = FALSE; + + pmap_ledgers_drift.num_pmaps_checked++; + +#define LEDGER_CHECK_BALANCE(__LEDGER) \ +MACRO_BEGIN \ + int panic_on_negative = TRUE; \ + ledger_get_balance(ledger, \ + task_ledgers.__LEDGER, \ + &bal); \ + ledger_get_panic_on_negative(ledger, \ + task_ledgers.__LEDGER, \ + &panic_on_negative); \ + if (bal != 0) { \ + if (panic_on_negative || \ + (pmap_ledgers_panic && \ + pmap_ledgers_panic_leeway > 0 && \ + (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \ + bal < (-pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \ + do_panic = TRUE; \ + } \ + printf("LEDGER BALANCE proc %d (%s) " \ + "\"%s\" = %lld\n", \ + pid, procname, #__LEDGER, bal); \ + if (bal > 0) { \ + pmap_ledgers_drift.__LEDGER##_over++; \ + pmap_ledgers_drift.__LEDGER##_over_total += bal; \ + if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \ + pmap_ledgers_drift.__LEDGER##_over_max = bal; \ + } \ + } else if (bal < 0) { \ + pmap_ledgers_drift.__LEDGER##_under++; \ + pmap_ledgers_drift.__LEDGER##_under_total += bal; \ + if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \ + pmap_ledgers_drift.__LEDGER##_under_max = bal; \ + } \ + } \ + } \ +MACRO_END + + LEDGER_CHECK_BALANCE(phys_footprint); + LEDGER_CHECK_BALANCE(internal); + LEDGER_CHECK_BALANCE(internal_compressed); + LEDGER_CHECK_BALANCE(iokit_mapped); + LEDGER_CHECK_BALANCE(alternate_accounting); + LEDGER_CHECK_BALANCE(alternate_accounting_compressed); + LEDGER_CHECK_BALANCE(page_table); + LEDGER_CHECK_BALANCE(purgeable_volatile); + LEDGER_CHECK_BALANCE(purgeable_nonvolatile); + LEDGER_CHECK_BALANCE(purgeable_volatile_compressed); + LEDGER_CHECK_BALANCE(purgeable_nonvolatile_compressed); + LEDGER_CHECK_BALANCE(tagged_nofootprint); + LEDGER_CHECK_BALANCE(tagged_footprint); + LEDGER_CHECK_BALANCE(tagged_nofootprint_compressed); + LEDGER_CHECK_BALANCE(tagged_footprint_compressed); + LEDGER_CHECK_BALANCE(network_volatile); + LEDGER_CHECK_BALANCE(network_nonvolatile); + LEDGER_CHECK_BALANCE(network_volatile_compressed); + LEDGER_CHECK_BALANCE(network_nonvolatile_compressed); + LEDGER_CHECK_BALANCE(media_nofootprint); + LEDGER_CHECK_BALANCE(media_footprint); + LEDGER_CHECK_BALANCE(media_nofootprint_compressed); + LEDGER_CHECK_BALANCE(media_footprint_compressed); + LEDGER_CHECK_BALANCE(graphics_nofootprint); + LEDGER_CHECK_BALANCE(graphics_footprint); + LEDGER_CHECK_BALANCE(graphics_nofootprint_compressed); + LEDGER_CHECK_BALANCE(graphics_footprint_compressed); + LEDGER_CHECK_BALANCE(neural_nofootprint); + LEDGER_CHECK_BALANCE(neural_footprint); + LEDGER_CHECK_BALANCE(neural_nofootprint_compressed); + LEDGER_CHECK_BALANCE(neural_footprint_compressed); + + if (do_panic) { + if (pmap_ledgers_panic) { + panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n", + pmap, pid, procname); + } else { + printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n", + pmap, pid, procname); + } + } } -#endif +#endif /* MACH_ASSERT */