X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/060df5ea7c632b1ac8cc8aac1fb59758165c2084..bca245acd4c03fd752d1a45f011ad495e60fe53d:/osfmk/vm/vm_user.c?ds=inline diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c index 582e51fc0..027d0c992 100644 --- a/osfmk/vm/vm_user.c +++ b/osfmk/vm/vm_user.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -58,7 +58,7 @@ /* * File: vm/vm_user.c * Author: Avadis Tevanian, Jr., Michael Wayne Young - * + * * User-exported virtual memory functions. */ @@ -90,17 +90,19 @@ #include #include #include -#include /* to get vm_address_t */ +#include /* to get vm_address_t */ #include -#include /* to get pointer_t */ +#include /* to get pointer_t */ #include #include #include #include #include +#include #include #include +#include #include #include @@ -114,40 +116,61 @@ #include #include #include +#include +#include + +#include + +#include +#include vm_size_t upl_offset_to_pagelist = 0; -#if VM_CPM +#if VM_CPM #include -#endif /* VM_CPM */ - -ipc_port_t dynamic_pager_control_port=NULL; +#endif /* VM_CPM */ /* * mach_vm_allocate allocates "zero fill" memory in the specfied * map. */ kern_return_t -mach_vm_allocate( - vm_map_t map, - mach_vm_offset_t *addr, - mach_vm_size_t size, - int flags) +mach_vm_allocate_external( + vm_map_t map, + mach_vm_offset_t *addr, + mach_vm_size_t size, + int flags) +{ + vm_tag_t tag; + + VM_GET_FLAGS_ALIAS(flags, tag); + return mach_vm_allocate_kernel(map, addr, size, flags, tag); +} + +kern_return_t +mach_vm_allocate_kernel( + vm_map_t map, + mach_vm_offset_t *addr, + mach_vm_size_t size, + int flags, + vm_tag_t tag) { vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t result; - boolean_t anywhere; + vm_map_size_t map_size; + kern_return_t result; + boolean_t anywhere; /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_ALLOCATE) + if (flags & ~VM_FLAGS_USER_ALLOCATE) { return KERN_INVALID_ARGUMENT; + } - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } if (size == 0) { *addr = 0; - return(KERN_SUCCESS); + return KERN_SUCCESS; } anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); @@ -163,58 +186,80 @@ mach_vm_allocate( * memory would tend to confuse those applications. */ map_addr = vm_map_min(map); - if (map_addr == 0) - map_addr += PAGE_SIZE; - } else - map_addr = vm_map_trunc_page(*addr); - map_size = vm_map_round_page(size); + if (map_addr == 0) { + map_addr += VM_MAP_PAGE_SIZE(map); + } + } else { + map_addr = vm_map_trunc_page(*addr, + VM_MAP_PAGE_MASK(map)); + } + map_size = vm_map_round_page(size, + VM_MAP_PAGE_MASK(map)); if (map_size == 0) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } result = vm_map_enter( - map, - &map_addr, - map_size, - (vm_map_offset_t)0, - flags, - VM_OBJECT_NULL, - (vm_object_offset_t)0, - FALSE, - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); + map, + &map_addr, + map_size, + (vm_map_offset_t)0, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + VM_OBJECT_NULL, + (vm_object_offset_t)0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); *addr = map_addr; - return(result); + return result; } /* - * vm_allocate + * vm_allocate * Legacy routine that allocates "zero fill" memory in the specfied * map (which is limited to the same size as the kernel). */ kern_return_t -vm_allocate( - vm_map_t map, - vm_offset_t *addr, - vm_size_t size, - int flags) +vm_allocate_external( + vm_map_t map, + vm_offset_t *addr, + vm_size_t size, + int flags) +{ + vm_tag_t tag; + + VM_GET_FLAGS_ALIAS(flags, tag); + return vm_allocate_kernel(map, addr, size, flags, tag); +} + +kern_return_t +vm_allocate_kernel( + vm_map_t map, + vm_offset_t *addr, + vm_size_t size, + int flags, + vm_tag_t tag) { vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t result; - boolean_t anywhere; + vm_map_size_t map_size; + kern_return_t result; + boolean_t anywhere; /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_ALLOCATE) + if (flags & ~VM_FLAGS_USER_ALLOCATE) { return KERN_INVALID_ARGUMENT; + } - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } if (size == 0) { *addr = 0; - return(KERN_SUCCESS); + return KERN_SUCCESS; } anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); @@ -230,30 +275,42 @@ vm_allocate( * memory would tend to confuse those applications. */ map_addr = vm_map_min(map); - if (map_addr == 0) - map_addr += PAGE_SIZE; - } else - map_addr = vm_map_trunc_page(*addr); - map_size = vm_map_round_page(size); + if (map_addr == 0) { + map_addr += VM_MAP_PAGE_SIZE(map); + } + } else { + map_addr = vm_map_trunc_page(*addr, + VM_MAP_PAGE_MASK(map)); + } + map_size = vm_map_round_page(size, + VM_MAP_PAGE_MASK(map)); if (map_size == 0) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } result = vm_map_enter( - map, - &map_addr, - map_size, - (vm_map_offset_t)0, - flags, - VM_OBJECT_NULL, - (vm_object_offset_t)0, - FALSE, - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); + map, + &map_addr, + map_size, + (vm_map_offset_t)0, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + VM_OBJECT_NULL, + (vm_object_offset_t)0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); + +#if KASAN + if (result == KERN_SUCCESS && map->pmap == kernel_pmap) { + kasan_notify_address(map_addr, map_size); + } +#endif *addr = CAST_DOWN(vm_offset_t, map_addr); - return(result); + return result; } /* @@ -263,18 +320,24 @@ vm_allocate( */ kern_return_t mach_vm_deallocate( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } - if (size == (mach_vm_offset_t) 0) - return(KERN_SUCCESS); + if (size == (mach_vm_offset_t) 0) { + return KERN_SUCCESS; + } - return(vm_map_remove(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), VM_MAP_NO_FLAGS)); + return vm_map_remove(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_NO_FLAGS); } /* @@ -285,18 +348,24 @@ mach_vm_deallocate( */ kern_return_t vm_deallocate( - register vm_map_t map, - vm_offset_t start, - vm_size_t size) + vm_map_t map, + vm_offset_t start, + vm_size_t size) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } - if (size == (vm_offset_t) 0) - return(KERN_SUCCESS); + if (size == (vm_offset_t) 0) { + return KERN_SUCCESS; + } - return(vm_map_remove(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), VM_MAP_NO_FLAGS)); + return vm_map_remove(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_NO_FLAGS); } /* @@ -306,22 +375,26 @@ vm_deallocate( */ kern_return_t mach_vm_inherit( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_inherit_t new_inheritance) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_inherit_t new_inheritance) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_inheritance > VM_INHERIT_LAST_VALID)) - return(KERN_INVALID_ARGUMENT); + (new_inheritance > VM_INHERIT_LAST_VALID)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_inherit(map, - vm_map_trunc_page(start), - vm_map_round_page(start+size), - new_inheritance)); + return vm_map_inherit(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_inheritance); } /* @@ -331,22 +404,26 @@ mach_vm_inherit( */ kern_return_t vm_inherit( - register vm_map_t map, - vm_offset_t start, - vm_size_t size, - vm_inherit_t new_inheritance) + vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_inherit_t new_inheritance) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_inheritance > VM_INHERIT_LAST_VALID)) - return(KERN_INVALID_ARGUMENT); + (new_inheritance > VM_INHERIT_LAST_VALID)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_inherit(map, - vm_map_trunc_page(start), - vm_map_round_page(start+size), - new_inheritance)); + return vm_map_inherit(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_inheritance); } /* @@ -357,24 +434,28 @@ vm_inherit( kern_return_t mach_vm_protect( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - boolean_t set_maximum, - vm_prot_t new_protection) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + boolean_t set_maximum, + vm_prot_t new_protection) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) - return(KERN_INVALID_ARGUMENT); + (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_protect(map, - vm_map_trunc_page(start), - vm_map_round_page(start+size), - new_protection, - set_maximum)); + return vm_map_protect(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_protection, + set_maximum); } /* @@ -386,24 +467,28 @@ mach_vm_protect( kern_return_t vm_protect( - vm_map_t map, - vm_offset_t start, - vm_size_t size, - boolean_t set_maximum, - vm_prot_t new_protection) + vm_map_t map, + vm_offset_t start, + vm_size_t size, + boolean_t set_maximum, + vm_prot_t new_protection) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) - return(KERN_INVALID_ARGUMENT); + (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_protect(map, - vm_map_trunc_page(start), - vm_map_round_page(start+size), - new_protection, - set_maximum)); + return vm_map_protect(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_protection, + set_maximum); } /* @@ -413,23 +498,28 @@ vm_protect( */ kern_return_t mach_vm_machine_attribute( - vm_map_t map, - mach_vm_address_t addr, - mach_vm_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value) /* IN/OUT */ + vm_map_t map, + mach_vm_address_t addr, + mach_vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { - if ((map == VM_MAP_NULL) || (addr + size < addr)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (addr + size < addr)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return vm_map_machine_attribute(map, - vm_map_trunc_page(addr), - vm_map_round_page(addr+size), - attribute, - value); + return vm_map_machine_attribute( + map, + vm_map_trunc_page(addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(addr + size, + VM_MAP_PAGE_MASK(map)), + attribute, + value); } /* @@ -440,23 +530,28 @@ mach_vm_machine_attribute( */ kern_return_t vm_machine_attribute( - vm_map_t map, - vm_address_t addr, - vm_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value) /* IN/OUT */ + vm_map_t map, + vm_address_t addr, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { - if ((map == VM_MAP_NULL) || (addr + size < addr)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (addr + size < addr)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return vm_map_machine_attribute(map, - vm_map_trunc_page(addr), - vm_map_round_page(addr+size), - attribute, - value); + return vm_map_machine_attribute( + map, + vm_map_trunc_page(addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(addr + size, + VM_MAP_PAGE_MASK(map)), + attribute, + value); } /* @@ -467,47 +562,49 @@ vm_machine_attribute( * the IPC implementation as part of receiving the reply to this call. * If IPC isn't used, the caller must deal with the vm_map_copy_t object * that gets returned. - * + * * JMM - because of mach_msg_type_number_t, this call is limited to a * single 4GB region at this time. * */ kern_return_t mach_vm_read( - vm_map_t map, - mach_vm_address_t addr, - mach_vm_size_t size, - pointer_t *data, - mach_msg_type_number_t *data_size) + vm_map_t map, + mach_vm_address_t addr, + mach_vm_size_t size, + pointer_t *data, + mach_msg_type_number_t *data_size) { - kern_return_t error; - vm_map_copy_t ipc_address; + kern_return_t error; + vm_map_copy_t ipc_address; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } - if ((mach_msg_type_number_t) size != size) + if ((mach_msg_type_number_t) size != size) { return KERN_INVALID_ARGUMENT; - + } + error = vm_map_copyin(map, - (vm_map_address_t)addr, - (vm_map_size_t)size, - FALSE, /* src_destroy */ - &ipc_address); + (vm_map_address_t)addr, + (vm_map_size_t)size, + FALSE, /* src_destroy */ + &ipc_address); if (KERN_SUCCESS == error) { *data = (pointer_t) ipc_address; *data_size = (mach_msg_type_number_t) size; assert(*data_size == size); } - return(error); + return error; } /* * vm_read - * Read/copy a range from one address space and return it to the caller. * Limited addressability (same range limits as for the native kernel map). - * + * * It is assumed that the address for the returned memory is selected by * the IPC implementation as part of receiving the reply to this call. * If IPC isn't used, the caller must deal with the vm_map_copy_t object @@ -515,19 +612,21 @@ mach_vm_read( */ kern_return_t vm_read( - vm_map_t map, - vm_address_t addr, - vm_size_t size, - pointer_t *data, - mach_msg_type_number_t *data_size) + vm_map_t map, + vm_address_t addr, + vm_size_t size, + pointer_t *data, + mach_msg_type_number_t *data_size) { - kern_return_t error; - vm_map_copy_t ipc_address; + kern_return_t error; + vm_map_copy_t ipc_address; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (size > (unsigned)(mach_msg_type_number_t) -1) { + mach_msg_type_number_t dsize; + if (os_convert_overflow(size, &dsize)) { /* * The kernel could handle a 64-bit "size" value, but * it could not return the size of the data in "*data_size" @@ -538,20 +637,20 @@ vm_read( } error = vm_map_copyin(map, - (vm_map_address_t)addr, - (vm_map_size_t)size, - FALSE, /* src_destroy */ - &ipc_address); + (vm_map_address_t)addr, + (vm_map_size_t)size, + FALSE, /* src_destroy */ + &ipc_address); if (KERN_SUCCESS == error) { *data = (pointer_t) ipc_address; - *data_size = (mach_msg_type_number_t) size; + *data_size = dsize; assert(*data_size == size); } - return(error); + return error; } -/* +/* * mach_vm_read_list - * Read/copy a list of address ranges from specified map. * @@ -561,37 +660,38 @@ vm_read( */ kern_return_t mach_vm_read_list( - vm_map_t map, - mach_vm_read_entry_t data_list, - natural_t count) + vm_map_t map, + mach_vm_read_entry_t data_list, + natural_t count) { - mach_msg_type_number_t i; - kern_return_t error; - vm_map_copy_t copy; + mach_msg_type_number_t i; + kern_return_t error; + vm_map_copy_t copy; if (map == VM_MAP_NULL || - count > VM_MAP_ENTRY_MAX) - return(KERN_INVALID_ARGUMENT); + count > VM_MAP_ENTRY_MAX) { + return KERN_INVALID_ARGUMENT; + } error = KERN_SUCCESS; - for(i=0; imap, - &map_addr, - copy); + current_task()->map, + &map_addr, + copy); if (KERN_SUCCESS == error) { data_list[i].address = map_addr; continue; @@ -602,10 +702,10 @@ mach_vm_read_list( data_list[i].address = (mach_vm_address_t)0; data_list[i].size = (mach_vm_size_t)0; } - return(error); + return error; } -/* +/* * vm_read_list - * Read/copy a list of address ranges from specified map. * @@ -626,39 +726,40 @@ mach_vm_read_list( kern_return_t vm_read_list( - vm_map_t map, - vm_read_entry_t data_list, - natural_t count) + vm_map_t map, + vm_read_entry_t data_list, + natural_t count) { - mach_msg_type_number_t i; - kern_return_t error; - vm_map_copy_t copy; + mach_msg_type_number_t i; + kern_return_t error; + vm_map_copy_t copy; if (map == VM_MAP_NULL || - count > VM_MAP_ENTRY_MAX) - return(KERN_INVALID_ARGUMENT); + count > VM_MAP_ENTRY_MAX) { + return KERN_INVALID_ARGUMENT; + } error = KERN_SUCCESS; - for(i=0; imap, - &map_addr, - copy); + error = vm_map_copyout(current_task()->map, + &map_addr, + copy); if (KERN_SUCCESS == error) { data_list[i].address = - CAST_DOWN(vm_offset_t, map_addr); + CAST_DOWN(vm_offset_t, map_addr); continue; } vm_map_copy_discard(copy); @@ -667,14 +768,14 @@ vm_read_list( data_list[i].address = (mach_vm_address_t)0; data_list[i].size = (mach_vm_size_t)0; } - return(error); + return error; } /* * mach_vm_read_overwrite - * Overwrite a range of the current map with data from the specified * map/address range. - * + * * In making an assumption that the current thread is local, it is * no longer cluster-safe without a fully supportive local proxy * thread/task (but we don't support cluster's anymore so this is moot). @@ -682,39 +783,40 @@ vm_read_list( kern_return_t mach_vm_read_overwrite( - vm_map_t map, - mach_vm_address_t address, - mach_vm_size_t size, - mach_vm_address_t data, - mach_vm_size_t *data_size) + vm_map_t map, + mach_vm_address_t address, + mach_vm_size_t size, + mach_vm_address_t data, + mach_vm_size_t *data_size) { - kern_return_t error; - vm_map_copy_t copy; + kern_return_t error; + vm_map_copy_t copy; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } error = vm_map_copyin(map, (vm_map_address_t)address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == error) { error = vm_map_copy_overwrite(current_thread()->map, - (vm_map_address_t)data, - copy, FALSE); + (vm_map_address_t)data, + copy, FALSE); if (KERN_SUCCESS == error) { *data_size = size; return error; } vm_map_copy_discard(copy); } - return(error); + return error; } /* * vm_read_overwrite - * Overwrite a range of the current map with data from the specified * map/address range. - * + * * This routine adds the additional limitation that the source and * destination ranges must be describable with vm_address_t values * (i.e. the same size address spaces as the kernel, or at least the @@ -724,32 +826,33 @@ mach_vm_read_overwrite( kern_return_t vm_read_overwrite( - vm_map_t map, - vm_address_t address, - vm_size_t size, - vm_address_t data, - vm_size_t *data_size) + vm_map_t map, + vm_address_t address, + vm_size_t size, + vm_address_t data, + vm_size_t *data_size) { - kern_return_t error; - vm_map_copy_t copy; + kern_return_t error; + vm_map_copy_t copy; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } error = vm_map_copyin(map, (vm_map_address_t)address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == error) { error = vm_map_copy_overwrite(current_thread()->map, - (vm_map_address_t)data, - copy, FALSE); + (vm_map_address_t)data, + copy, FALSE); if (KERN_SUCCESS == error) { *data_size = size; return error; } vm_map_copy_discard(copy); } - return(error); + return error; } @@ -760,16 +863,17 @@ vm_read_overwrite( */ kern_return_t mach_vm_write( - vm_map_t map, - mach_vm_address_t address, - pointer_t data, - __unused mach_msg_type_number_t size) + vm_map_t map, + mach_vm_address_t address, + pointer_t data, + __unused mach_msg_type_number_t size) { - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } return vm_map_copy_overwrite(map, (vm_map_address_t)address, - (vm_map_copy_t) data, FALSE /* interruptible XXX */); + (vm_map_copy_t) data, FALSE /* interruptible XXX */); } /* @@ -784,16 +888,17 @@ mach_vm_write( */ kern_return_t vm_write( - vm_map_t map, - vm_address_t address, - pointer_t data, - __unused mach_msg_type_number_t size) + vm_map_t map, + vm_address_t address, + pointer_t data, + __unused mach_msg_type_number_t size) { - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } return vm_map_copy_overwrite(map, (vm_map_address_t)address, - (vm_map_copy_t) data, FALSE /* interruptible XXX */); + (vm_map_copy_t) data, FALSE /* interruptible XXX */); } /* @@ -804,54 +909,58 @@ vm_write( */ kern_return_t mach_vm_copy( - vm_map_t map, - mach_vm_address_t source_address, - mach_vm_size_t size, - mach_vm_address_t dest_address) + vm_map_t map, + mach_vm_address_t source_address, + mach_vm_size_t size, + mach_vm_address_t dest_address) { vm_map_copy_t copy; kern_return_t kr; - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } kr = vm_map_copyin(map, (vm_map_address_t)source_address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == kr) { kr = vm_map_copy_overwrite(map, - (vm_map_address_t)dest_address, - copy, FALSE /* interruptible XXX */); + (vm_map_address_t)dest_address, + copy, FALSE /* interruptible XXX */); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { vm_map_copy_discard(copy); + } } return kr; } kern_return_t vm_copy( - vm_map_t map, - vm_address_t source_address, - vm_size_t size, - vm_address_t dest_address) + vm_map_t map, + vm_address_t source_address, + vm_size_t size, + vm_address_t dest_address) { vm_map_copy_t copy; kern_return_t kr; - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } kr = vm_map_copyin(map, (vm_map_address_t)source_address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == kr) { kr = vm_map_copy_overwrite(map, - (vm_map_address_t)dest_address, - copy, FALSE /* interruptible XXX */); + (vm_map_address_t)dest_address, + copy, FALSE /* interruptible XXX */); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { vm_map_copy_discard(copy); + } } return kr; } @@ -868,51 +977,120 @@ vm_copy( * */ kern_return_t -mach_vm_map( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t initial_size, - mach_vm_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) +mach_vm_map_external( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_tag_t tag; + + VM_GET_FLAGS_ALIAS(flags, tag); + return mach_vm_map_kernel(target_map, address, initial_size, mask, + flags, VM_MAP_KERNEL_FLAGS_NONE, tag, + port, offset, copy, + cur_protection, max_protection, + inheritance); +} + +kern_return_t +mach_vm_map_kernel( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { + kern_return_t kr; + vm_map_offset_t vmmaddr; + + vmmaddr = (vm_map_offset_t) *address; + /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_MAP) + if (flags & ~VM_FLAGS_USER_MAP) { return KERN_INVALID_ARGUMENT; + } + + kr = vm_map_enter_mem_object(target_map, + &vmmaddr, + initial_size, + mask, + flags, + vmk_flags, + tag, + port, + offset, + copy, + cur_protection, + max_protection, + inheritance); + +#if KASAN + if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) { + kasan_notify_address(vmmaddr, initial_size); + } +#endif - return vm_map_enter_mem_object(target_map, - address, - initial_size, - mask, - flags, - port, - offset, - copy, - cur_protection, - max_protection, - inheritance); + *address = vmmaddr; + return kr; } /* legacy interface */ kern_return_t -vm_map_64( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) +vm_map_64_external( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_tag_t tag; + + VM_GET_FLAGS_ALIAS(flags, tag); + return vm_map_64_kernel(target_map, address, size, mask, + flags, VM_MAP_KERNEL_FLAGS_NONE, + tag, port, offset, copy, + cur_protection, max_protection, + inheritance); +} + +kern_return_t +vm_map_64_kernel( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { mach_vm_address_t map_addr; mach_vm_size_t map_size; @@ -923,27 +1101,53 @@ vm_map_64( map_size = (mach_vm_size_t)size; map_mask = (mach_vm_offset_t)mask; - kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, - port, offset, copy, - cur_protection, max_protection, inheritance); + kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, + flags, vmk_flags, tag, + port, offset, copy, + cur_protection, max_protection, inheritance); *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } /* temporary, until world build */ kern_return_t -vm_map( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - ipc_port_t port, - vm_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) +vm_map_external( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_tag_t tag; + + VM_GET_FLAGS_ALIAS(flags, tag); + return vm_map_kernel(target_map, address, size, mask, + flags, VM_MAP_KERNEL_FLAGS_NONE, tag, + port, offset, copy, + cur_protection, max_protection, inheritance); +} + +kern_return_t +vm_map_kernel( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { mach_vm_address_t map_addr; mach_vm_size_t map_size; @@ -956,9 +1160,10 @@ vm_map( map_mask = (mach_vm_offset_t)mask; obj_offset = (vm_object_offset_t)offset; - kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, - port, obj_offset, copy, - cur_protection, max_protection, inheritance); + kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, + flags, vmk_flags, tag, + port, obj_offset, copy, + cur_protection, max_protection, inheritance); *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } @@ -970,44 +1175,69 @@ vm_map( * over top of itself (with altered permissions and/or * as an in-place copy of itself). */ +kern_return_t +mach_vm_remap_external( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) +{ + vm_tag_t tag; + VM_GET_FLAGS_ALIAS(flags, tag); + + return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address, + copy, cur_protection, max_protection, inheritance); +} kern_return_t -mach_vm_remap( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t size, - mach_vm_offset_t mask, - int flags, - vm_map_t src_map, - mach_vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) +mach_vm_remap_kernel( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_tag_t tag, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { - vm_map_offset_t map_addr; - kern_return_t kr; + vm_map_offset_t map_addr; + kern_return_t kr; - if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) + if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { return KERN_INVALID_ARGUMENT; + } /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_REMAP) + if (flags & ~VM_FLAGS_USER_REMAP) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; kr = vm_map_remap(target_map, - &map_addr, - size, - mask, - flags, - src_map, - memory_address, - copy, - cur_protection, - max_protection, - inheritance); + &map_addr, + size, + mask, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + src_map, + memory_address, + copy, + cur_protection, + max_protection, + inheritance); *address = map_addr; return kr; } @@ -1024,42 +1254,68 @@ mach_vm_remap( * kernel context). */ kern_return_t -vm_remap( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_map_t src_map, - vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) +vm_remap_external( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_t src_map, + vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) +{ + vm_tag_t tag; + VM_GET_FLAGS_ALIAS(flags, tag); + + return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, + memory_address, copy, cur_protection, max_protection, inheritance); +} + +kern_return_t +vm_remap_kernel( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_tag_t tag, + vm_map_t src_map, + vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { - vm_map_offset_t map_addr; - kern_return_t kr; + vm_map_offset_t map_addr; + kern_return_t kr; - if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) + if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { return KERN_INVALID_ARGUMENT; + } /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_REMAP) + if (flags & ~VM_FLAGS_USER_REMAP) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; kr = vm_map_remap(target_map, - &map_addr, - size, - mask, - flags, - src_map, - memory_address, - copy, - cur_protection, - max_protection, - inheritance); + &map_addr, + size, + mask, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + src_map, + memory_address, + copy, + cur_protection, + max_protection, + inheritance); *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } @@ -1078,32 +1334,56 @@ vm_remap( * [ To unwire the pages, specify VM_PROT_NONE. ] */ kern_return_t -mach_vm_wire( - host_priv_t host_priv, - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_prot_t access) +mach_vm_wire_external( + host_priv_t host_priv, + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_prot_t access) { - kern_return_t rc; + return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK); +} + +kern_return_t +mach_vm_wire_kernel( + host_priv_t host_priv, + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_prot_t access, + vm_tag_t tag) +{ + kern_return_t rc; - if (host_priv == HOST_PRIV_NULL) + if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_HOST; + } assert(host_priv == &realhost); - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_TASK; + } - if (access & ~VM_PROT_ALL || (start + size < start)) + if (access & ~VM_PROT_ALL || (start + size < start)) { return KERN_INVALID_ARGUMENT; + } if (access != VM_PROT_NONE) { - rc = vm_map_wire(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), access, TRUE); + rc = vm_map_wire_kernel(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + access, tag, + TRUE); } else { - rc = vm_map_unwire(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), TRUE); + rc = vm_map_unwire(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + TRUE); } return rc; } @@ -1118,33 +1398,45 @@ mach_vm_wire( */ kern_return_t vm_wire( - host_priv_t host_priv, - register vm_map_t map, - vm_offset_t start, - vm_size_t size, - vm_prot_t access) + host_priv_t host_priv, + vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_prot_t access) { - kern_return_t rc; + kern_return_t rc; - if (host_priv == HOST_PRIV_NULL) + if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_HOST; + } assert(host_priv == &realhost); - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_TASK; + } - if ((access & ~VM_PROT_ALL) || (start + size < start)) + if ((access & ~VM_PROT_ALL) || (start + size < start)) { return KERN_INVALID_ARGUMENT; + } if (size == 0) { rc = KERN_SUCCESS; } else if (access != VM_PROT_NONE) { - rc = vm_map_wire(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), access, TRUE); + rc = vm_map_wire_kernel(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + access, VM_KERN_MEMORY_OSFMK, + TRUE); } else { - rc = vm_map_unwire(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), TRUE); + rc = vm_map_unwire(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + TRUE); } return rc; } @@ -1182,19 +1474,19 @@ vm_wire( kern_return_t mach_vm_msync( - vm_map_t map, - mach_vm_address_t address, - mach_vm_size_t size, - vm_sync_t sync_flags) + vm_map_t map, + mach_vm_address_t address, + mach_vm_size_t size, + vm_sync_t sync_flags) { - - if (map == VM_MAP_NULL) - return(KERN_INVALID_TASK); + if (map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } return vm_map_msync(map, (vm_map_address_t)address, - (vm_map_size_t)size, sync_flags); + (vm_map_size_t)size, sync_flags); } - + /* * vm_msync * @@ -1231,22 +1523,54 @@ mach_vm_msync( kern_return_t vm_msync( - vm_map_t map, - vm_address_t address, - vm_size_t size, - vm_sync_t sync_flags) + vm_map_t map, + vm_address_t address, + vm_size_t size, + vm_sync_t sync_flags) { - - if (map == VM_MAP_NULL) - return(KERN_INVALID_TASK); + if (map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } return vm_map_msync(map, (vm_map_address_t)address, - (vm_map_size_t)size, sync_flags); + (vm_map_size_t)size, sync_flags); } +int +vm_toggle_entry_reuse(int toggle, int *old_value) +{ + vm_map_t map = current_map(); + + assert(!map->is_nested_map); + if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) { + *old_value = map->disable_vmentry_reuse; + } else if (toggle == VM_TOGGLE_SET) { + vm_map_entry_t map_to_entry; + + vm_map_lock(map); + vm_map_disable_hole_optimization(map); + map->disable_vmentry_reuse = TRUE; + __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map)); + if (map->first_free == map_to_entry) { + map->highest_entry_end = vm_map_min(map); + } else { + map->highest_entry_end = map->first_free->vme_end; + } + vm_map_unlock(map); + } else if (toggle == VM_TOGGLE_CLEAR) { + vm_map_lock(map); + map->disable_vmentry_reuse = FALSE; + vm_map_unlock(map); + } else { + return KERN_INVALID_ARGUMENT; + } + + return KERN_SUCCESS; +} + /* - * mach_vm_behavior_set + * mach_vm_behavior_set * * Sets the paging behavior attribute for the specified range * in the specified map. @@ -1254,25 +1578,47 @@ vm_msync( * This routine will fail with KERN_INVALID_ADDRESS if any address * in [start,start+size) is not a valid allocated memory region. */ -kern_return_t +kern_return_t mach_vm_behavior_set( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_behavior_t new_behavior) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_behavior_t new_behavior) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + vm_map_offset_t align_mask; + + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } + + switch (new_behavior) { + case VM_BEHAVIOR_REUSABLE: + case VM_BEHAVIOR_REUSE: + case VM_BEHAVIOR_CAN_REUSE: + /* + * Align to the hardware page size, to allow + * malloc() to maximize the amount of re-usability, + * even on systems with larger software page size. + */ + align_mask = PAGE_MASK; + break; + default: + align_mask = VM_MAP_PAGE_MASK(map); + break; + } - return(vm_map_behavior_set(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), new_behavior)); + return vm_map_behavior_set(map, + vm_map_trunc_page(start, align_mask), + vm_map_round_page(start + size, align_mask), + new_behavior); } /* - * vm_behavior_set + * vm_behavior_set * * Sets the paging behavior attribute for the specified range * in the specified map. @@ -1284,21 +1630,21 @@ mach_vm_behavior_set( * use of vm_offset_t (if the map provided is larger than the * kernel's). */ -kern_return_t +kern_return_t vm_behavior_set( - vm_map_t map, - vm_offset_t start, - vm_size_t size, - vm_behavior_t new_behavior) + vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_behavior_t new_behavior) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); - - if (size == 0) - return KERN_SUCCESS; + if (start + size < start) { + return KERN_INVALID_ARGUMENT; + } - return(vm_map_behavior_set(map, vm_map_trunc_page(start), - vm_map_round_page(start+size), new_behavior)); + return mach_vm_behavior_set(map, + (mach_vm_offset_t) start, + (mach_vm_size_t) size, + new_behavior); } /* @@ -1317,32 +1663,34 @@ vm_behavior_set( kern_return_t mach_vm_region( - vm_map_t map, - mach_vm_offset_t *address, /* IN/OUT */ - mach_vm_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + mach_vm_offset_t *address, /* IN/OUT */ + mach_vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_offset_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; map_size = (vm_map_size_t)*size; /* legacy conversion */ - if (VM_REGION_BASIC_INFO == flavor) + if (VM_REGION_BASIC_INFO == flavor) { flavor = VM_REGION_BASIC_INFO_64; + } kr = vm_map_region(map, - &map_addr, &map_size, - flavor, info, count, - object_name); + &map_addr, &map_size, + flavor, info, count, + object_name); *address = map_addr; *size = map_size; @@ -1365,71 +1713,76 @@ mach_vm_region( kern_return_t vm_region_64( - vm_map_t map, - vm_offset_t *address, /* IN/OUT */ - vm_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_offset_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; map_size = (vm_map_size_t)*size; /* legacy conversion */ - if (VM_REGION_BASIC_INFO == flavor) + if (VM_REGION_BASIC_INFO == flavor) { flavor = VM_REGION_BASIC_INFO_64; + } kr = vm_map_region(map, - &map_addr, &map_size, - flavor, info, count, - object_name); + &map_addr, &map_size, + flavor, info, count, + object_name); *address = CAST_DOWN(vm_offset_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t vm_region( - vm_map_t map, - vm_address_t *address, /* IN/OUT */ - vm_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm_address_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; kr = vm_map_region(map, - &map_addr, &map_size, - flavor, info, count, - object_name); + &map_addr, &map_size, + flavor, info, count, + object_name); *address = CAST_DOWN(vm_address_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } @@ -1440,30 +1793,31 @@ vm_region( */ kern_return_t mach_vm_region_recurse( - vm_map_t map, - mach_vm_address_t *address, - mach_vm_size_t *size, - uint32_t *depth, - vm_region_recurse_info_t info, - mach_msg_type_number_t *infoCnt) + vm_map_t map, + mach_vm_address_t *address, + mach_vm_size_t *size, + uint32_t *depth, + vm_region_recurse_info_t info, + mach_msg_type_number_t *infoCnt) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; kr = vm_map_region_recurse_64( - map, - &map_addr, - &map_size, - depth, - (vm_region_submap_info_64_t)info, - infoCnt); + map, + &map_addr, + &map_size, + depth, + (vm_region_submap_info_64_t)info, + infoCnt); *address = map_addr; *size = map_size; @@ -1477,125 +1831,141 @@ mach_vm_region_recurse( */ kern_return_t vm_region_recurse_64( - vm_map_t map, - vm_address_t *address, - vm_size_t *size, - uint32_t *depth, - vm_region_recurse_info_64_t info, - mach_msg_type_number_t *infoCnt) + vm_map_t map, + vm_address_t *address, + vm_size_t *size, + uint32_t *depth, + vm_region_recurse_info_64_t info, + mach_msg_type_number_t *infoCnt) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; kr = vm_map_region_recurse_64( - map, - &map_addr, - &map_size, - depth, - (vm_region_submap_info_64_t)info, - infoCnt); + map, + &map_addr, + &map_size, + depth, + (vm_region_submap_info_64_t)info, + infoCnt); *address = CAST_DOWN(vm_address_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t vm_region_recurse( - vm_map_t map, - vm_offset_t *address, /* IN/OUT */ - vm_size_t *size, /* OUT */ - natural_t *depth, /* IN/OUT */ - vm_region_recurse_info_t info32, /* IN/OUT */ - mach_msg_type_number_t *infoCnt) /* IN/OUT */ + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + natural_t *depth, /* IN/OUT */ + vm_region_recurse_info_t info32, /* IN/OUT */ + mach_msg_type_number_t *infoCnt) /* IN/OUT */ { vm_region_submap_info_data_64_t info64; vm_region_submap_info_t info; - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) + if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) { return KERN_INVALID_ARGUMENT; + } + - map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; info = (vm_region_submap_info_t)info32; *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; - kr = vm_map_region_recurse_64(map, &map_addr,&map_size, - depth, &info64, infoCnt); + kr = vm_map_region_recurse_64(map, &map_addr, &map_size, + depth, &info64, infoCnt); info->protection = info64.protection; info->max_protection = info64.max_protection; info->inheritance = info64.inheritance; info->offset = (uint32_t)info64.offset; /* trouble-maker */ - info->user_tag = info64.user_tag; - info->pages_resident = info64.pages_resident; - info->pages_shared_now_private = info64.pages_shared_now_private; - info->pages_swapped_out = info64.pages_swapped_out; - info->pages_dirtied = info64.pages_dirtied; - info->ref_count = info64.ref_count; - info->shadow_depth = info64.shadow_depth; - info->external_pager = info64.external_pager; - info->share_mode = info64.share_mode; + info->user_tag = info64.user_tag; + info->pages_resident = info64.pages_resident; + info->pages_shared_now_private = info64.pages_shared_now_private; + info->pages_swapped_out = info64.pages_swapped_out; + info->pages_dirtied = info64.pages_dirtied; + info->ref_count = info64.ref_count; + info->shadow_depth = info64.shadow_depth; + info->external_pager = info64.external_pager; + info->share_mode = info64.share_mode; info->is_submap = info64.is_submap; info->behavior = info64.behavior; info->object_id = info64.object_id; - info->user_wired_count = info64.user_wired_count; + info->user_wired_count = info64.user_wired_count; *address = CAST_DOWN(vm_address_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t mach_vm_purgable_control( - vm_map_t map, - mach_vm_offset_t address, - vm_purgable_t control, - int *state) + vm_map_t map, + mach_vm_offset_t address, + vm_purgable_t control, + int *state) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } + + if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { + /* not allowed from user-space */ + return KERN_INVALID_ARGUMENT; + } return vm_map_purgable_control(map, - vm_map_trunc_page(address), - control, - state); + vm_map_trunc_page(address, PAGE_MASK), + control, + state); } kern_return_t vm_purgable_control( - vm_map_t map, - vm_offset_t address, - vm_purgable_t control, - int *state) + vm_map_t map, + vm_offset_t address, + vm_purgable_t control, + int *state) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } + + if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { + /* not allowed from user-space */ + return KERN_INVALID_ARGUMENT; + } return vm_map_purgable_control(map, - vm_map_trunc_page(address), - control, - state); + vm_map_trunc_page(address, PAGE_MASK), + control, + state); } - + /* * Ordinarily, the right to allocate CPM is restricted @@ -1603,7 +1973,7 @@ vm_purgable_control( * to the host priv port). Set this variable to zero if * you want to let any application allocate CPM. */ -unsigned int vm_allocate_cpm_privileged = 0; +unsigned int vm_allocate_cpm_privileged = 0; /* * Allocate memory in the specified map, with the caveat that @@ -1617,29 +1987,31 @@ unsigned int vm_allocate_cpm_privileged = 0; */ kern_return_t vm_allocate_cpm( - host_priv_t host_priv, - vm_map_t map, - vm_address_t *addr, - vm_size_t size, - int flags) + host_priv_t host_priv, + vm_map_t map, + vm_address_t *addr, + vm_size_t size, + int flags) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) + if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) { return KERN_INVALID_HOST; + } - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*addr; map_size = (vm_map_size_t)size; kr = vm_map_enter_cpm(map, - &map_addr, - map_size, - flags); + &map_addr, + map_size, + flags); *addr = CAST_DOWN(vm_address_t, map_addr); return kr; @@ -1648,116 +2020,256 @@ vm_allocate_cpm( kern_return_t mach_vm_page_query( - vm_map_t map, - mach_vm_offset_t offset, - int *disposition, - int *ref_count) + vm_map_t map, + mach_vm_offset_t offset, + int *disposition, + int *ref_count) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } - return vm_map_page_query_internal(map, - vm_map_trunc_page(offset), - disposition, ref_count); + return vm_map_page_query_internal( + map, + vm_map_trunc_page(offset, PAGE_MASK), + disposition, ref_count); } kern_return_t vm_map_page_query( - vm_map_t map, - vm_offset_t offset, - int *disposition, - int *ref_count) + vm_map_t map, + vm_offset_t offset, + int *disposition, + int *ref_count) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } - return vm_map_page_query_internal(map, - vm_map_trunc_page(offset), - disposition, ref_count); + return vm_map_page_query_internal( + map, + vm_map_trunc_page(offset, PAGE_MASK), + disposition, ref_count); } kern_return_t -mach_vm_page_info( - vm_map_t map, - mach_vm_address_t address, - vm_page_info_flavor_t flavor, - vm_page_info_t info, - mach_msg_type_number_t *count) +mach_vm_page_range_query( + vm_map_t map, + mach_vm_offset_t address, + mach_vm_size_t size, + mach_vm_address_t dispositions_addr, + mach_vm_size_t *dispositions_count) { - kern_return_t kr; - - if (map == VM_MAP_NULL) { + kern_return_t kr = KERN_SUCCESS; + int num_pages = 0, i = 0; + mach_vm_size_t curr_sz = 0, copy_sz = 0; + mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0; + mach_msg_type_number_t count = 0; + + void *info = NULL; + void *local_disp = NULL;; + vm_map_size_t info_size = 0, local_disp_size = 0; + mach_vm_offset_t start = 0, end = 0; + + if (map == VM_MAP_NULL || dispositions_count == NULL) { return KERN_INVALID_ARGUMENT; } - kr = vm_map_page_info(map, address, flavor, info, count); - return kr; -} + disp_buf_req_size = (*dispositions_count * sizeof(int)); + start = mach_vm_trunc_page(address); + end = mach_vm_round_page(address + size); -/* map a (whole) upl into an address space */ -kern_return_t -vm_upl_map( - vm_map_t map, - upl_t upl, - vm_address_t *dst_addr) -{ - vm_map_offset_t map_addr; - kern_return_t kr; + if (end < start) { + return KERN_INVALID_ARGUMENT; + } - if (VM_MAP_NULL == map) + if ((end - start) < size) { + /* + * Aligned size is less than unaligned size. + */ return KERN_INVALID_ARGUMENT; + } - kr = vm_map_enter_upl(map, upl, &map_addr); - *dst_addr = CAST_DOWN(vm_address_t, map_addr); - return kr; -} + if (disp_buf_req_size == 0 || (end == start)) { + return KERN_SUCCESS; + } -kern_return_t -vm_upl_unmap( - vm_map_t map, - upl_t upl) -{ - if (VM_MAP_NULL == map) - return KERN_INVALID_ARGUMENT; + /* + * For large requests, we will go through them + * MAX_PAGE_RANGE_QUERY chunk at a time. + */ - return (vm_map_remove_upl(map, upl)); -} + curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY); + num_pages = (int) (curr_sz >> PAGE_SHIFT); -/* Retrieve a upl for an object underlying an address range in a map */ + info_size = num_pages * sizeof(vm_page_info_basic_data_t); + info = kalloc(info_size); + + if (info == NULL) { + return KERN_RESOURCE_SHORTAGE; + } + + local_disp_size = num_pages * sizeof(int); + local_disp = kalloc(local_disp_size); + + if (local_disp == NULL) { + kfree(info, info_size); + info = NULL; + return KERN_RESOURCE_SHORTAGE; + } + + while (size) { + count = VM_PAGE_INFO_BASIC_COUNT; + kr = vm_map_page_range_info_internal( + map, + start, + mach_vm_round_page(start + curr_sz), + VM_PAGE_INFO_BASIC, + (vm_page_info_t) info, + &count); + + assert(kr == KERN_SUCCESS); + + for (i = 0; i < num_pages; i++) { + ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition; + } + + copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */); + kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz); + + start += curr_sz; + disp_buf_req_size -= copy_sz; + disp_buf_total_size += copy_sz; + + if (kr != 0) { + break; + } + + if ((disp_buf_req_size == 0) || (curr_sz >= size)) { + /* + * We might have inspected the full range OR + * more than it esp. if the user passed in + * non-page aligned start/size and/or if we + * descended into a submap. We are done here. + */ + + size = 0; + } else { + dispositions_addr += copy_sz; + + size -= curr_sz; + + curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY); + num_pages = (int)(curr_sz >> PAGE_SHIFT); + } + } + + *dispositions_count = disp_buf_total_size / sizeof(int); + + kfree(local_disp, local_disp_size); + local_disp = NULL; + + kfree(info, info_size); + info = NULL; + + return kr; +} + +kern_return_t +mach_vm_page_info( + vm_map_t map, + mach_vm_address_t address, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count) +{ + kern_return_t kr; + + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = vm_map_page_info(map, address, flavor, info, count); + return kr; +} + +/* map a (whole) upl into an address space */ +kern_return_t +vm_upl_map( + vm_map_t map, + upl_t upl, + vm_address_t *dst_addr) +{ + vm_map_offset_t map_addr; + kern_return_t kr; + + if (VM_MAP_NULL == map) { + return KERN_INVALID_ARGUMENT; + } + + kr = vm_map_enter_upl(map, upl, &map_addr); + *dst_addr = CAST_DOWN(vm_address_t, map_addr); + return kr; +} + +kern_return_t +vm_upl_unmap( + vm_map_t map, + upl_t upl) +{ + if (VM_MAP_NULL == map) { + return KERN_INVALID_ARGUMENT; + } + + return vm_map_remove_upl(map, upl); +} + +/* Retrieve a upl for an object underlying an address range in a map */ kern_return_t vm_map_get_upl( - vm_map_t map, - vm_map_offset_t map_offset, - upl_size_t *upl_size, - upl_t *upl, - upl_page_info_array_t page_list, - unsigned int *count, - int *flags, - int force_data_sync) + vm_map_t map, + vm_map_offset_t map_offset, + upl_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + upl_control_flags_t *flags, + vm_tag_t tag, + int force_data_sync) { - int map_flags; - kern_return_t kr; + upl_control_flags_t map_flags; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_flags = *flags & ~UPL_NOZEROFILL; - if (force_data_sync) + if (force_data_sync) { map_flags |= UPL_FORCE_DATA_SYNC; + } kr = vm_map_create_upl(map, - map_offset, - upl_size, - upl, - page_list, - count, - &map_flags); + map_offset, + upl_size, + upl, + page_list, + count, + &map_flags, + tag); *flags = (map_flags & ~UPL_FORCE_DATA_SYNC); return kr; } +#if CONFIG_EMBEDDED +extern int proc_selfpid(void); +extern char *proc_name_address(void *p); +int cs_executable_mem_entry = 0; +int log_executable_mem_entry = 0; +#endif /* CONFIG_EMBEDDED */ + /* * mach_make_memory_entry_64 * @@ -1766,135 +2278,170 @@ vm_map_get_upl( * somewhere else. Rather than doing it all at once (and * without needing access to the other whole map). */ - kern_return_t mach_make_memory_entry_64( - vm_map_t target_map, - memory_object_size_t *size, + vm_map_t target_map, + memory_object_size_t *size, memory_object_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_handle) + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_handle) { - vm_map_version_t version; - vm_named_entry_t parent_entry; - vm_named_entry_t user_entry; - ipc_port_t user_handle; - kern_return_t kr; - vm_map_t real_map; + vm_named_entry_kernel_flags_t vmne_kflags; + + if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) { + /* + * Unknown flag: reject for forward compatibility. + */ + return KERN_INVALID_VALUE; + } + + vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE; + if (permission & MAP_MEM_LEDGER_TAGGED) { + vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT; + } + return mach_make_memory_entry_internal(target_map, + size, + offset, + permission, + vmne_kflags, + object_handle, + parent_handle); +} + +kern_return_t +mach_make_memory_entry_internal( + vm_map_t target_map, + memory_object_size_t *size, + memory_object_offset_t offset, + vm_prot_t permission, + vm_named_entry_kernel_flags_t vmne_kflags, + ipc_port_t *object_handle, + ipc_port_t parent_handle) +{ + vm_map_version_t version; + vm_named_entry_t parent_entry; + vm_named_entry_t user_entry; + ipc_port_t user_handle; + kern_return_t kr; + vm_map_t real_map; /* needed for call to vm_map_lookup_locked */ - boolean_t wired; - vm_object_offset_t obj_off; - vm_prot_t prot; - struct vm_object_fault_info fault_info; - vm_object_t object; - vm_object_t shadow_object; + boolean_t wired; + boolean_t iskernel; + vm_object_offset_t obj_off; + vm_prot_t prot; + struct vm_object_fault_info fault_info = {}; + vm_object_t object; + vm_object_t shadow_object; /* needed for direct map entry manipulation */ - vm_map_entry_t map_entry; - vm_map_entry_t next_entry; - vm_map_t local_map; - vm_map_t original_map = target_map; - vm_map_size_t total_size; - vm_map_size_t map_size; - vm_map_offset_t map_offset; - vm_map_offset_t local_offset; - vm_object_size_t mappable_size; - - unsigned int access; - vm_prot_t protections; - unsigned int wimg_mode; - boolean_t cache_attr = FALSE; - - if (((permission & 0x00FF0000) & - ~(MAP_MEM_ONLY | - MAP_MEM_NAMED_CREATE | - MAP_MEM_PURGABLE | - MAP_MEM_NAMED_REUSE))) { + vm_map_entry_t map_entry; + vm_map_entry_t next_entry; + vm_map_t local_map; + vm_map_t original_map = target_map; + vm_map_size_t total_size, map_size; + vm_map_offset_t map_start, map_end; + vm_map_offset_t local_offset; + vm_object_size_t mappable_size; + + /* + * Stash the offset in the page for use by vm_map_enter_mem_object() + * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case. + */ + vm_object_offset_t offset_in_page; + + unsigned int access; + vm_prot_t protections; + vm_prot_t original_protections, mask_protections; + unsigned int wimg_mode; + + boolean_t force_shadow = FALSE; + boolean_t use_data_addr; + boolean_t use_4K_compat; +#if VM_NAMED_ENTRY_LIST + int alias = -1; +#endif /* VM_NAMED_ENTRY_LIST */ + + if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) { /* * Unknown flag: reject for forward compatibility. */ return KERN_INVALID_VALUE; } - if (parent_handle != IP_NULL && + if (IP_VALID(parent_handle) && ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) { - parent_entry = (vm_named_entry_t) parent_handle->ip_kobject; + parent_entry = (vm_named_entry_t) ip_get_kobject(parent_handle); } else { parent_entry = NULL; } - protections = permission & VM_PROT_ALL; + if (parent_entry && parent_entry->is_copy) { + return KERN_INVALID_ARGUMENT; + } + + original_protections = permission & VM_PROT_ALL; + protections = original_protections; + mask_protections = permission & VM_PROT_IS_MASK; access = GET_MAP_MEM(permission); + use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0); + use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0); user_handle = IP_NULL; user_entry = NULL; - map_offset = vm_map_trunc_page(offset); - map_size = vm_map_round_page(*size); + map_start = vm_map_trunc_page(offset, PAGE_MASK); if (permission & MAP_MEM_ONLY) { - boolean_t parent_is_object; + boolean_t parent_is_object; - if (parent_entry == NULL) { + map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_size = map_end - map_start; + + if (use_data_addr || use_4K_compat || parent_entry == NULL) { return KERN_INVALID_ARGUMENT; } - parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager); + parent_is_object = !parent_entry->is_sub_map; object = parent_entry->backing.object; - if(parent_is_object && object != VM_OBJECT_NULL) + if (parent_is_object && object != VM_OBJECT_NULL) { wimg_mode = object->wimg_bits; - else - wimg_mode = VM_WIMG_DEFAULT; - if((access != GET_MAP_MEM(parent_entry->protection)) && - !(parent_entry->protection & VM_PROT_WRITE)) { + } else { + wimg_mode = VM_WIMG_USE_DEFAULT; + } + if ((access != GET_MAP_MEM(parent_entry->protection)) && + !(parent_entry->protection & VM_PROT_WRITE)) { return KERN_INVALID_RIGHT; } - if(access == MAP_MEM_IO) { - SET_MAP_MEM(access, parent_entry->protection); - wimg_mode = VM_WIMG_IO; - } else if (access == MAP_MEM_COPYBACK) { - SET_MAP_MEM(access, parent_entry->protection); - wimg_mode = VM_WIMG_DEFAULT; - } else if (access == MAP_MEM_WTHRU) { - SET_MAP_MEM(access, parent_entry->protection); - wimg_mode = VM_WIMG_WTHRU; - } else if (access == MAP_MEM_WCOMB) { - SET_MAP_MEM(access, parent_entry->protection); - wimg_mode = VM_WIMG_WCOMB; - } - if(parent_is_object && object && - (access != MAP_MEM_NOOP) && - (!(object->nophyscache))) { - if(object->wimg_bits != wimg_mode) { - vm_page_t p; - if ((wimg_mode == VM_WIMG_IO) - || (wimg_mode == VM_WIMG_WCOMB)) - cache_attr = TRUE; - else - cache_attr = FALSE; - vm_object_lock(object); - vm_object_paging_wait(object, THREAD_UNINT); - object->wimg_bits = wimg_mode; - queue_iterate(&object->memq, - p, vm_page_t, listq) { - if (!p->fictitious) { - if (p->pmapped) - pmap_disconnect(p->phys_page); - if (cache_attr) - pmap_sync_page_attributes_phys(p->phys_page); - } - } - vm_object_unlock(object); + vm_prot_to_wimg(access, &wimg_mode); + if (access != MAP_MEM_NOOP) { + SET_MAP_MEM(access, parent_entry->protection); + } + if (parent_is_object && object && + (access != MAP_MEM_NOOP) && + (!(object->nophyscache))) { + if (object->wimg_bits != wimg_mode) { + vm_object_lock(object); + vm_object_change_wimg_mode(object, wimg_mode); + vm_object_unlock(object); } } - if (object_handle) + if (object_handle) { *object_handle = IP_NULL; + } return KERN_SUCCESS; - } + } else if (permission & MAP_MEM_NAMED_CREATE) { + int ledger_flags = 0; + task_t owner; + + map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_size = map_end - map_start; + + if (use_data_addr || use_4K_compat) { + return KERN_INVALID_ARGUMENT; + } - if(permission & MAP_MEM_NAMED_CREATE) { kr = mach_memory_entry_allocate(&user_entry, &user_handle); if (kr != KERN_SUCCESS) { return KERN_FAILURE; @@ -1916,52 +2463,117 @@ mach_make_memory_entry_64( object = vm_object_allocate(map_size); assert(object != VM_OBJECT_NULL); - if (permission & MAP_MEM_PURGABLE) { - if (! (permission & VM_PROT_WRITE)) { - /* if we can't write, we can't purge */ + /* + * XXX + * We use this path when we want to make sure that + * nobody messes with the object (coalesce, for + * example) before we map it. + * We might want to use these objects for transposition via + * vm_object_transpose() too, so we don't want any copy or + * shadow objects either... + */ + object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + object->true_share = TRUE; + + owner = current_task(); + if ((permission & MAP_MEM_PURGABLE) || + vmne_kflags.vmnekf_ledger_tag) { + assert(object->vo_owner == NULL); + assert(object->resident_page_count == 0); + assert(object->wired_page_count == 0); + assert(owner != TASK_NULL); + if (vmne_kflags.vmnekf_ledger_no_footprint) { + ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; + object->vo_no_footprint = TRUE; + } + if (permission & MAP_MEM_PURGABLE) { + if (!(permission & VM_PROT_WRITE)) { + /* if we can't write, we can't purge */ + vm_object_deallocate(object); + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } + object->purgable = VM_PURGABLE_NONVOLATILE; + if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) { + object->purgeable_only_by_kernel = TRUE; + } +#if __arm64__ + if (owner->task_legacy_footprint) { + /* + * For ios11, we failed to account for + * this memory. Keep doing that for + * legacy apps (built before ios12), + * for backwards compatibility's sake... + */ + owner = kernel_task; + } +#endif /* __arm64__ */ + vm_object_lock(object); + vm_purgeable_nonvolatile_enqueue(object, owner); + vm_object_unlock(object); + } + } + + if (vmne_kflags.vmnekf_ledger_tag) { + /* + * Bill this object to the current task's + * ledgers for the given tag. + */ + if (vmne_kflags.vmnekf_ledger_no_footprint) { + ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; + } + vm_object_lock(object); + object->vo_ledger_tag = vmne_kflags.vmnekf_ledger_tag; + kr = vm_object_ownership_change( + object, + vmne_kflags.vmnekf_ledger_tag, + owner, /* new owner */ + ledger_flags, + FALSE); /* task_objq locked? */ + vm_object_unlock(object); + if (kr != KERN_SUCCESS) { vm_object_deallocate(object); - kr = KERN_INVALID_ARGUMENT; goto make_mem_done; } - object->purgable = VM_PURGABLE_NONVOLATILE; } +#if CONFIG_SECLUDED_MEMORY + if (secluded_for_iokit && /* global boot-arg */ + ((permission & MAP_MEM_GRAB_SECLUDED) +#if 11 + /* XXX FBDP for my testing only */ + || (secluded_for_fbdp && map_size == 97550336) +#endif + )) { +#if 11 + if (!(permission & MAP_MEM_GRAB_SECLUDED) && + secluded_for_fbdp) { + printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size); + } +#endif + object->can_grab_secluded = TRUE; + assert(!object->eligible_for_secluded); + } +#endif /* CONFIG_SECLUDED_MEMORY */ + /* * The VM object is brand new and nobody else knows about it, * so we don't need to lock it. */ wimg_mode = object->wimg_bits; - if (access == MAP_MEM_IO) { - wimg_mode = VM_WIMG_IO; - } else if (access == MAP_MEM_COPYBACK) { - wimg_mode = VM_WIMG_DEFAULT; - } else if (access == MAP_MEM_WTHRU) { - wimg_mode = VM_WIMG_WTHRU; - } else if (access == MAP_MEM_WCOMB) { - wimg_mode = VM_WIMG_WCOMB; - } + vm_prot_to_wimg(access, &wimg_mode); if (access != MAP_MEM_NOOP) { object->wimg_bits = wimg_mode; } - /* the object has no pages, so no WIMG bits to update here */ - /* - * XXX - * We use this path when we want to make sure that - * nobody messes with the object (coalesce, for - * example) before we map it. - * We might want to use these objects for transposition via - * vm_object_transpose() too, so we don't want any copy or - * shadow objects either... - */ - object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + /* the object has no pages, so no WIMG bits to update here */ user_entry->backing.object = object; user_entry->internal = TRUE; user_entry->is_sub_map = FALSE; - user_entry->is_pager = FALSE; user_entry->offset = 0; + user_entry->data_offset = 0; user_entry->protection = protections; SET_MAP_MEM(access, user_entry->protection); user_entry->size = map_size; @@ -1969,13 +2581,145 @@ mach_make_memory_entry_64( /* user_object pager and internal fields are not used */ /* when the object field is filled in. */ - *size = CAST_DOWN(vm_size_t, map_size); + *size = CAST_DOWN(vm_size_t, (user_entry->size - + user_entry->data_offset)); + *object_handle = user_handle; + return KERN_SUCCESS; + } + + if (permission & MAP_MEM_VM_COPY) { + vm_map_copy_t copy; + + if (target_map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } + + map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_size = map_end - map_start; + if (use_data_addr || use_4K_compat) { + offset_in_page = offset - map_start; + if (use_4K_compat) { + offset_in_page &= ~((signed)(0xFFF)); + } + } else { + offset_in_page = 0; + } + + kr = vm_map_copyin_internal(target_map, + map_start, + map_size, + VM_MAP_COPYIN_ENTRY_LIST, + ©); + if (kr != KERN_SUCCESS) { + return kr; + } + + kr = mach_memory_entry_allocate(&user_entry, &user_handle); + if (kr != KERN_SUCCESS) { + vm_map_copy_discard(copy); + return KERN_FAILURE; + } + + user_entry->backing.copy = copy; + user_entry->internal = FALSE; + user_entry->is_sub_map = FALSE; + user_entry->is_copy = TRUE; + user_entry->offset = 0; + user_entry->protection = protections; + user_entry->size = map_size; + user_entry->data_offset = offset_in_page; + + *size = CAST_DOWN(vm_size_t, (user_entry->size - + user_entry->data_offset)); + *object_handle = user_handle; + return KERN_SUCCESS; + } + + if (permission & MAP_MEM_VM_SHARE) { + vm_map_copy_t copy; + vm_prot_t cur_prot, max_prot; + + if (target_map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } + + map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_size = map_end - map_start; + if (use_data_addr || use_4K_compat) { + offset_in_page = offset - map_start; + if (use_4K_compat) { + offset_in_page &= ~((signed)(0xFFF)); + } + } else { + offset_in_page = 0; + } + + cur_prot = VM_PROT_ALL; + kr = vm_map_copy_extract(target_map, + map_start, + map_size, + ©, + &cur_prot, + &max_prot); + if (kr != KERN_SUCCESS) { + return kr; + } + + if (mask_protections) { + /* + * We just want as much of "original_protections" + * as we can get out of the actual "cur_prot". + */ + protections &= cur_prot; + if (protections == VM_PROT_NONE) { + /* no access at all: fail */ + vm_map_copy_discard(copy); + return KERN_PROTECTION_FAILURE; + } + } else { + /* + * We want exactly "original_protections" + * out of "cur_prot". + */ + if ((cur_prot & protections) != protections) { + vm_map_copy_discard(copy); + return KERN_PROTECTION_FAILURE; + } + } + + kr = mach_memory_entry_allocate(&user_entry, &user_handle); + if (kr != KERN_SUCCESS) { + vm_map_copy_discard(copy); + return KERN_FAILURE; + } + + user_entry->backing.copy = copy; + user_entry->internal = FALSE; + user_entry->is_sub_map = FALSE; + user_entry->is_copy = TRUE; + user_entry->offset = 0; + user_entry->protection = protections; + user_entry->size = map_size; + user_entry->data_offset = offset_in_page; + + *size = CAST_DOWN(vm_size_t, (user_entry->size - + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; } if (parent_entry == NULL || (permission & MAP_MEM_NAMED_REUSE)) { + map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_size = map_end - map_start; + if (use_data_addr || use_4K_compat) { + offset_in_page = offset - map_start; + if (use_4K_compat) { + offset_in_page &= ~((signed)(0xFFF)); + } + } else { + offset_in_page = 0; + } /* Create a named object based on address range within the task map */ /* Go find the object at given address */ @@ -1985,31 +2729,87 @@ mach_make_memory_entry_64( } redo_lookup: + protections = original_protections; vm_map_lock_read(target_map); /* get the object associated with the target address */ /* note we check the permission of the range against */ /* that requested by the caller */ - kr = vm_map_lookup_locked(&target_map, map_offset, - protections, OBJECT_LOCK_EXCLUSIVE, &version, - &object, &obj_off, &prot, &wired, - &fault_info, - &real_map); + kr = vm_map_lookup_locked(&target_map, map_start, + protections | mask_protections, + OBJECT_LOCK_EXCLUSIVE, &version, + &object, &obj_off, &prot, &wired, + &fault_info, + &real_map); if (kr != KERN_SUCCESS) { vm_map_unlock_read(target_map); goto make_mem_done; } - if (((prot & protections) != protections) - || (object == kernel_object)) { + if (mask_protections) { + /* + * The caller asked us to use the "protections" as + * a mask, so restrict "protections" to what this + * mapping actually allows. + */ + protections &= prot; + } +#if CONFIG_EMBEDDED + /* + * Wiring would copy the pages to a shadow object. + * The shadow object would not be code-signed so + * attempting to execute code from these copied pages + * would trigger a code-signing violation. + */ + if (prot & VM_PROT_EXECUTE) { + if (log_executable_mem_entry) { + void *bsd_info; + bsd_info = current_task()->bsd_info; + printf("pid %d[%s] making memory entry out of " + "executable range from 0x%llx to 0x%llx:" + "might cause code-signing issues " + "later\n", + proc_selfpid(), + (bsd_info != NULL + ? proc_name_address(bsd_info) + : "?"), + (uint64_t) map_start, + (uint64_t) map_end); + } + DTRACE_VM2(cs_executable_mem_entry, + uint64_t, (uint64_t)map_start, + uint64_t, (uint64_t)map_end); + cs_executable_mem_entry++; + +#if 11 + /* + * We don't know how the memory entry will be used. + * It might never get wired and might not cause any + * trouble, so let's not reject this request... + */ +#else /* 11 */ + kr = KERN_PROTECTION_FAILURE; + vm_object_unlock(object); + vm_map_unlock_read(target_map); + if (real_map != target_map) { + vm_map_unlock_read(real_map); + } + goto make_mem_done; +#endif /* 11 */ + } +#endif /* CONFIG_EMBEDDED */ + + if (((prot & protections) != protections) + || (object == kernel_object)) { kr = KERN_INVALID_RIGHT; vm_object_unlock(object); vm_map_unlock_read(target_map); - if(real_map != target_map) + if (real_map != target_map) { vm_map_unlock_read(real_map); - if(object == kernel_object) { + } + if (object == kernel_object) { printf("Warning: Attempt to create a named" - " entry from the kernel_object\n"); + " entry from the kernel_object\n"); } goto make_mem_done; } @@ -2032,116 +2832,191 @@ redo_lookup: vm_object_unlock(object); local_map = original_map; - local_offset = map_offset; - if(target_map != local_map) { + local_offset = map_start; + if (target_map != local_map) { vm_map_unlock_read(target_map); - if(real_map != target_map) + if (real_map != target_map) { vm_map_unlock_read(real_map); + } vm_map_lock_read(local_map); target_map = local_map; real_map = local_map; } - while(TRUE) { - if(!vm_map_lookup_entry(local_map, - local_offset, &map_entry)) { - kr = KERN_INVALID_ARGUMENT; - vm_map_unlock_read(target_map); - if(real_map != target_map) - vm_map_unlock_read(real_map); - vm_object_deallocate(object); /* release extra ref */ - object = VM_OBJECT_NULL; - goto make_mem_done; - } - if(!(map_entry->is_sub_map)) { - if(map_entry->object.vm_object != object) { - kr = KERN_INVALID_ARGUMENT; - vm_map_unlock_read(target_map); - if(real_map != target_map) - vm_map_unlock_read(real_map); - vm_object_deallocate(object); /* release extra ref */ - object = VM_OBJECT_NULL; - goto make_mem_done; - } - break; - } else { - vm_map_t tmap; - tmap = local_map; - local_map = map_entry->object.sub_map; - - vm_map_lock_read(local_map); - vm_map_unlock_read(tmap); - target_map = local_map; - real_map = local_map; - local_offset = local_offset - map_entry->vme_start; - local_offset += map_entry->offset; - } + while (TRUE) { + if (!vm_map_lookup_entry(local_map, + local_offset, &map_entry)) { + kr = KERN_INVALID_ARGUMENT; + vm_map_unlock_read(target_map); + if (real_map != target_map) { + vm_map_unlock_read(real_map); + } + vm_object_deallocate(object); /* release extra ref */ + object = VM_OBJECT_NULL; + goto make_mem_done; + } + iskernel = (local_map->pmap == kernel_pmap); + if (!(map_entry->is_sub_map)) { + if (VME_OBJECT(map_entry) != object) { + kr = KERN_INVALID_ARGUMENT; + vm_map_unlock_read(target_map); + if (real_map != target_map) { + vm_map_unlock_read(real_map); + } + vm_object_deallocate(object); /* release extra ref */ + object = VM_OBJECT_NULL; + goto make_mem_done; + } + break; + } else { + vm_map_t tmap; + tmap = local_map; + local_map = VME_SUBMAP(map_entry); + + vm_map_lock_read(local_map); + vm_map_unlock_read(tmap); + target_map = local_map; + real_map = local_map; + local_offset = local_offset - map_entry->vme_start; + local_offset += VME_OFFSET(map_entry); + } } +#if VM_NAMED_ENTRY_LIST + alias = VME_ALIAS(map_entry); +#endif /* VM_NAMED_ENTRY_LIST */ + /* * We found the VM map entry, lock the VM object again. */ vm_object_lock(object); - if(map_entry->wired_count) { - /* JMM - The check below should be reworked instead. */ - object->true_share = TRUE; - } - if(((map_entry->max_protection) & protections) != protections) { - kr = KERN_INVALID_RIGHT; - vm_object_unlock(object); - vm_map_unlock_read(target_map); - if(real_map != target_map) + if (map_entry->wired_count) { + /* JMM - The check below should be reworked instead. */ + object->true_share = TRUE; + } + if (mask_protections) { + /* + * The caller asked us to use the "protections" as + * a mask, so restrict "protections" to what this + * mapping actually allows. + */ + protections &= map_entry->max_protection; + } + if (((map_entry->max_protection) & protections) != protections) { + kr = KERN_INVALID_RIGHT; + vm_object_unlock(object); + vm_map_unlock_read(target_map); + if (real_map != target_map) { vm_map_unlock_read(real_map); - vm_object_deallocate(object); - object = VM_OBJECT_NULL; - goto make_mem_done; + } + vm_object_deallocate(object); + object = VM_OBJECT_NULL; + goto make_mem_done; } mappable_size = fault_info.hi_offset - obj_off; total_size = map_entry->vme_end - map_entry->vme_start; - if(map_size > mappable_size) { + if (map_size > mappable_size) { /* try to extend mappable size if the entries */ /* following are from the same object and are */ /* compatible */ next_entry = map_entry->vme_next; /* lets see if the next map entry is still */ /* pointing at this object and is contiguous */ - while(map_size > mappable_size) { - if((next_entry->object.vm_object == object) && - (next_entry->vme_start == - next_entry->vme_prev->vme_end) && - (next_entry->offset == - next_entry->vme_prev->offset + - (next_entry->vme_prev->vme_end - - next_entry->vme_prev->vme_start))) { - if(((next_entry->max_protection) - & protections) != protections) { - break; + while (map_size > mappable_size) { + if ((VME_OBJECT(next_entry) == object) && + (next_entry->vme_start == + next_entry->vme_prev->vme_end) && + (VME_OFFSET(next_entry) == + (VME_OFFSET(next_entry->vme_prev) + + (next_entry->vme_prev->vme_end - + next_entry->vme_prev->vme_start)))) { + if (mask_protections) { + /* + * The caller asked us to use + * the "protections" as a mask, + * so restrict "protections" to + * what this mapping actually + * allows. + */ + protections &= next_entry->max_protection; + } + if ((next_entry->wired_count) && + (map_entry->wired_count == 0)) { + break; + } + if (((next_entry->max_protection) + & protections) != protections) { + break; } if (next_entry->needs_copy != - map_entry->needs_copy) + map_entry->needs_copy) { break; + } mappable_size += next_entry->vme_end - - next_entry->vme_start; + - next_entry->vme_start; total_size += next_entry->vme_end - - next_entry->vme_start; + - next_entry->vme_start; next_entry = next_entry->vme_next; } else { break; } - } } - if(object->internal) { - /* vm_map_lookup_locked will create a shadow if */ - /* needs_copy is set but does not check for the */ - /* other two conditions shown. It is important to */ + /* vm_map_entry_should_cow_for_true_share() checks for malloc tags, + * never true in kernel */ + if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) && + object->vo_size > map_size && + map_size != 0) { + /* + * Set up the targeted range for copy-on-write to + * limit the impact of "true_share"/"copy_delay" to + * that range instead of the entire VM object... + */ + + vm_object_unlock(object); + if (vm_map_lock_read_to_write(target_map)) { + vm_object_deallocate(object); + target_map = original_map; + goto redo_lookup; + } + + vm_map_clip_start(target_map, + map_entry, + vm_map_trunc_page(map_start, + VM_MAP_PAGE_MASK(target_map))); + vm_map_clip_end(target_map, + map_entry, + (vm_map_round_page(map_end, + VM_MAP_PAGE_MASK(target_map)))); + force_shadow = TRUE; + + if ((map_entry->vme_end - offset) < map_size) { + map_size = map_entry->vme_end - map_start; + } + total_size = map_entry->vme_end - map_entry->vme_start; + + vm_map_lock_write_to_read(target_map); + vm_object_lock(object); + } + + if (object->internal) { + /* vm_map_lookup_locked will create a shadow if */ + /* needs_copy is set but does not check for the */ + /* other two conditions shown. It is important to */ /* set up an object which will not be pulled from */ /* under us. */ - if ((map_entry->needs_copy || object->shadowed || - (object->size > total_size)) - && !object->true_share) { + if (force_shadow || + ((map_entry->needs_copy || + object->shadowed || + (object->vo_size > total_size && + (VME_OFFSET(map_entry) != 0 || + object->vo_size > + vm_map_round_page(total_size, + VM_MAP_PAGE_MASK(target_map))))) + && !object->true_share + && object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)) { /* * We have to unlock the VM object before * trying to upgrade the VM map lock, to @@ -2155,7 +3030,7 @@ redo_lookup: */ vm_object_unlock(object); - if (vm_map_lock_read_to_write(target_map)) { + if (vm_map_lock_read_to_write(target_map)) { /* * We couldn't upgrade our VM map lock * from "read" to "write" and we lost @@ -2164,61 +3039,72 @@ redo_lookup: */ vm_object_deallocate(object); /* extra ref */ target_map = original_map; - goto redo_lookup; - } + goto redo_lookup; + } +#if 00 vm_object_lock(object); +#endif - /* + /* * JMM - We need to avoid coming here when the object * is wired by anybody, not just the current map. Why * couldn't we use the standard vm_object_copy_quickly() * approach here? */ - - /* create a shadow object */ - vm_object_shadow(&map_entry->object.vm_object, - &map_entry->offset, total_size); - shadow_object = map_entry->object.vm_object; + + /* create a shadow object */ + VME_OBJECT_SHADOW(map_entry, total_size); + shadow_object = VME_OBJECT(map_entry); +#if 00 vm_object_unlock(object); +#endif prot = map_entry->protection & ~VM_PROT_WRITE; - if (override_nx(target_map, map_entry->alias) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(target_map, + VME_ALIAS(map_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } vm_object_pmap_protect( - object, map_entry->offset, + object, VME_OFFSET(map_entry), total_size, - ((map_entry->is_shared - || target_map->mapped) - ? PMAP_NULL : - target_map->pmap), + ((map_entry->is_shared + || target_map->mapped_in_other_pmaps) + ? PMAP_NULL : + target_map->pmap), map_entry->vme_start, prot); - total_size -= (map_entry->vme_end - - map_entry->vme_start); + total_size -= (map_entry->vme_end + - map_entry->vme_start); next_entry = map_entry->vme_next; map_entry->needs_copy = FALSE; vm_object_lock(shadow_object); while (total_size) { - if(next_entry->object.vm_object == object) { - vm_object_reference_locked(shadow_object); - next_entry->object.vm_object - = shadow_object; - vm_object_deallocate(object); - next_entry->offset - = next_entry->vme_prev->offset + - (next_entry->vme_prev->vme_end - - next_entry->vme_prev->vme_start); + assert((next_entry->wired_count == 0) || + (map_entry->wired_count)); + + if (VME_OBJECT(next_entry) == object) { + vm_object_reference_locked(shadow_object); + VME_OBJECT_SET(next_entry, + shadow_object); + vm_object_deallocate(object); + VME_OFFSET_SET( + next_entry, + (VME_OFFSET(next_entry->vme_prev) + + (next_entry->vme_prev->vme_end + - next_entry->vme_prev->vme_start))); + next_entry->use_pmap = TRUE; next_entry->needs_copy = FALSE; } else { panic("mach_make_memory_entry_64:" - " map entries out of sync\n"); + " map entries out of sync\n"); } - total_size -= - next_entry->vme_end - - next_entry->vme_start; + total_size -= + next_entry->vme_end + - next_entry->vme_start; next_entry = next_entry->vme_next; } @@ -2230,12 +3116,12 @@ redo_lookup: vm_object_deallocate(object); /* extra ref */ object = shadow_object; - obj_off = (local_offset - map_entry->vme_start) - + map_entry->offset; + obj_off = ((local_offset - map_entry->vme_start) + + VME_OFFSET(map_entry)); vm_map_lock_write_to_read(target_map); - } - } + } + } /* note: in the future we can (if necessary) allow for */ /* memory object lists, this will better support */ @@ -2248,21 +3134,31 @@ redo_lookup: /* against delayed copy, etc. is mostly defensive. */ wimg_mode = object->wimg_bits; - if(!(object->nophyscache)) { - if(access == MAP_MEM_IO) { - wimg_mode = VM_WIMG_IO; - } else if (access == MAP_MEM_COPYBACK) { - wimg_mode = VM_WIMG_USE_DEFAULT; - } else if (access == MAP_MEM_WTHRU) { - wimg_mode = VM_WIMG_WTHRU; - } else if (access == MAP_MEM_WCOMB) { - wimg_mode = VM_WIMG_WCOMB; - } + if (!(object->nophyscache)) { + vm_prot_to_wimg(access, &wimg_mode); } +#if VM_OBJECT_TRACKING_OP_TRUESHARE + if (!object->true_share && + vm_object_tracking_inited) { + void *bt[VM_OBJECT_TRACKING_BTDEPTH]; + int num = 0; + + num = OSBacktrace(bt, + VM_OBJECT_TRACKING_BTDEPTH); + btlog_add_entry(vm_object_tracking_btlog, + object, + VM_OBJECT_TRACKING_OP_TRUESHARE, + bt, + num); + } +#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ + + vm_object_lock_assert_exclusive(object); object->true_share = TRUE; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } /* * The memory entry now points to this VM object and we @@ -2272,30 +3168,12 @@ redo_lookup: */ vm_map_unlock_read(target_map); - if(real_map != target_map) + if (real_map != target_map) { vm_map_unlock_read(real_map); + } - if(object->wimg_bits != wimg_mode) { - vm_page_t p; - - vm_object_paging_wait(object, THREAD_UNINT); - - if ((wimg_mode == VM_WIMG_IO) - || (wimg_mode == VM_WIMG_WCOMB)) - cache_attr = TRUE; - else - cache_attr = FALSE; - - queue_iterate(&object->memq, - p, vm_page_t, listq) { - if (!p->fictitious) { - if (p->pmapped) - pmap_disconnect(p->phys_page); - if (cache_attr) - pmap_sync_page_attributes_phys(p->phys_page); - } - } - object->wimg_bits = wimg_mode; + if (object->wimg_bits != wimg_mode) { + vm_object_change_wimg_mode(object, wimg_mode); } /* the size of mapped entry that overlaps with our region */ @@ -2304,8 +3182,9 @@ redo_lookup: /* offset of our beg addr within entry */ /* it corresponds to this: */ - if(map_size > mappable_size) + if (map_size > mappable_size) { map_size = mappable_size; + } if (permission & MAP_MEM_NAMED_REUSE) { /* @@ -2317,10 +3196,13 @@ redo_lookup: parent_entry->backing.object == object && parent_entry->internal == object->internal && parent_entry->is_sub_map == FALSE && - parent_entry->is_pager == FALSE && parent_entry->offset == obj_off && parent_entry->protection == protections && - parent_entry->size == map_size) { + parent_entry->size == map_size && + ((!(use_data_addr || use_4K_compat) && + (parent_entry->data_offset == 0)) || + ((use_data_addr || use_4K_compat) && + (parent_entry->data_offset == offset_in_page)))) { /* * We have a match: re-use "parent_entry". */ @@ -2330,6 +3212,10 @@ redo_lookup: /* parent_entry->ref_count++; XXX ? */ /* Get an extra send-right on handle */ ipc_port_copy_send(parent_handle); + + *size = CAST_DOWN(vm_size_t, + (parent_entry->size - + parent_entry->data_offset)); *object_handle = parent_handle; return KERN_SUCCESS; } else { @@ -2351,31 +3237,73 @@ redo_lookup: user_entry->backing.object = object; user_entry->internal = object->internal; user_entry->is_sub_map = FALSE; - user_entry->is_pager = FALSE; user_entry->offset = obj_off; - user_entry->protection = permission; + user_entry->data_offset = offset_in_page; + user_entry->protection = protections; + SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection); user_entry->size = map_size; +#if VM_NAMED_ENTRY_LIST + user_entry->named_entry_alias = alias; +#endif /* VM_NAMED_ENTRY_LIST */ /* user_object pager and internal fields are not used */ /* when the object field is filled in. */ - *size = CAST_DOWN(vm_size_t, map_size); + *size = CAST_DOWN(vm_size_t, (user_entry->size - + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; - } else { /* The new object will be base on an existing named object */ - if (parent_entry == NULL) { kr = KERN_INVALID_ARGUMENT; goto make_mem_done; } - if((offset + map_size) > parent_entry->size) { - kr = KERN_INVALID_ARGUMENT; - goto make_mem_done; + + if (use_data_addr || use_4K_compat) { + /* + * submaps and pagers should only be accessible from within + * the kernel, which shouldn't use the data address flag, so can fail here. + */ + if (parent_entry->is_sub_map) { + panic("Shouldn't be using data address with a parent entry that is a submap."); + } + /* + * Account for offset to data in parent entry and + * compute our own offset to data. + */ + if ((offset + *size + parent_entry->data_offset) > parent_entry->size) { + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } + + map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK); + offset_in_page = (offset + parent_entry->data_offset) - map_start; + if (use_4K_compat) { + offset_in_page &= ~((signed)(0xFFF)); + } + map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK); + map_size = map_end - map_start; + } else { + map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_size = map_end - map_start; + offset_in_page = 0; + + if ((offset + map_size) > parent_entry->size) { + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } } - if((protections & parent_entry->protection) != protections) { + if (mask_protections) { + /* + * The caller asked us to use the "protections" as + * a mask, so restrict "protections" to what this + * mapping actually allows. + */ + protections &= parent_entry->protection; + } + if ((protections & parent_entry->protection) != protections) { kr = KERN_PROTECTION_FAILURE; goto make_mem_done; } @@ -2387,38 +3315,54 @@ redo_lookup: } user_entry->size = map_size; - user_entry->offset = parent_entry->offset + map_offset; + user_entry->offset = parent_entry->offset + map_start; + user_entry->data_offset = offset_in_page; user_entry->is_sub_map = parent_entry->is_sub_map; - user_entry->is_pager = parent_entry->is_pager; + user_entry->is_copy = parent_entry->is_copy; user_entry->internal = parent_entry->internal; user_entry->protection = protections; - if(access != MAP_MEM_NOOP) { - SET_MAP_MEM(access, user_entry->protection); + if (access != MAP_MEM_NOOP) { + SET_MAP_MEM(access, user_entry->protection); } - if(parent_entry->is_sub_map) { - user_entry->backing.map = parent_entry->backing.map; - vm_map_lock(user_entry->backing.map); - user_entry->backing.map->ref_count++; - vm_map_unlock(user_entry->backing.map); - } - else if (parent_entry->is_pager) { - user_entry->backing.pager = parent_entry->backing.pager; - /* JMM - don't we need a reference here? */ + if (parent_entry->is_sub_map) { + vm_map_t map = parent_entry->backing.map; + user_entry->backing.map = map; + lck_mtx_lock(&map->s_lock); + os_ref_retain_locked(&map->map_refcnt); + lck_mtx_unlock(&map->s_lock); } else { - object = parent_entry->backing.object; - assert(object != VM_OBJECT_NULL); - user_entry->backing.object = object; - /* we now point to this object, hold on */ - vm_object_reference(object); - vm_object_lock(object); - object->true_share = TRUE; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) - object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; - vm_object_unlock(object); + object = parent_entry->backing.object; + assert(object != VM_OBJECT_NULL); + user_entry->backing.object = object; + /* we now point to this object, hold on */ + vm_object_lock(object); + vm_object_reference_locked(object); +#if VM_OBJECT_TRACKING_OP_TRUESHARE + if (!object->true_share && + vm_object_tracking_inited) { + void *bt[VM_OBJECT_TRACKING_BTDEPTH]; + int num = 0; + + num = OSBacktrace(bt, + VM_OBJECT_TRACKING_BTDEPTH); + btlog_add_entry(vm_object_tracking_btlog, + object, + VM_OBJECT_TRACKING_OP_TRUESHARE, + bt, + num); + } +#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ + + object->true_share = TRUE; + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { + object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } + vm_object_unlock(object); } - *size = CAST_DOWN(vm_size_t, map_size); + *size = CAST_DOWN(vm_size_t, (user_entry->size - + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; } @@ -2437,40 +3381,40 @@ make_mem_done: kern_return_t _mach_make_memory_entry( - vm_map_t target_map, - memory_object_size_t *size, - memory_object_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_entry) -{ - memory_object_size_t mo_size; - kern_return_t kr; - + vm_map_t target_map, + memory_object_size_t *size, + memory_object_offset_t offset, + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_entry) +{ + memory_object_size_t mo_size; + kern_return_t kr; + mo_size = (memory_object_size_t)*size; - kr = mach_make_memory_entry_64(target_map, &mo_size, - (memory_object_offset_t)offset, permission, object_handle, - parent_entry); + kr = mach_make_memory_entry_64(target_map, &mo_size, + (memory_object_offset_t)offset, permission, object_handle, + parent_entry); *size = mo_size; return kr; } kern_return_t mach_make_memory_entry( - vm_map_t target_map, - vm_size_t *size, - vm_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_entry) -{ - memory_object_size_t mo_size; - kern_return_t kr; - + vm_map_t target_map, + vm_size_t *size, + vm_offset_t offset, + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_entry) +{ + memory_object_size_t mo_size; + kern_return_t kr; + mo_size = (memory_object_size_t)*size; - kr = mach_make_memory_entry_64(target_map, &mo_size, - (memory_object_offset_t)offset, permission, object_handle, - parent_entry); + kr = mach_make_memory_entry_64(target_map, &mo_size, + (memory_object_offset_t)offset, permission, object_handle, + parent_entry); *size = CAST_DOWN(vm_size_t, mo_size); return kr; } @@ -2484,74 +3428,105 @@ mach_make_memory_entry( * this routine is done with the vm_wire interface. */ kern_return_t -task_wire( - vm_map_t map, - boolean_t must_wire) +task_wire( + vm_map_t map, + boolean_t must_wire) +{ + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } + + vm_map_lock(map); + map->wiring_required = (must_wire == TRUE); + vm_map_unlock(map); + + return KERN_SUCCESS; +} + +kern_return_t +vm_map_exec_lockdown( + vm_map_t map) { - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (must_wire) - map->wiring_required = TRUE; - else - map->wiring_required = FALSE; + vm_map_lock(map); + map->map_disallow_new_exec = TRUE; + vm_map_unlock(map); + + return KERN_SUCCESS; +} + +#if VM_NAMED_ENTRY_LIST +queue_head_t vm_named_entry_list; +int vm_named_entry_count = 0; +lck_mtx_t vm_named_entry_list_lock_data; +lck_mtx_ext_t vm_named_entry_list_lock_data_ext; +#endif /* VM_NAMED_ENTRY_LIST */ - return(KERN_SUCCESS); +void vm_named_entry_init(void); +void +vm_named_entry_init(void) +{ +#if VM_NAMED_ENTRY_LIST + queue_init(&vm_named_entry_list); + vm_named_entry_count = 0; + lck_mtx_init_ext(&vm_named_entry_list_lock_data, + &vm_named_entry_list_lock_data_ext, + &vm_object_lck_grp, + &vm_object_lck_attr); +#endif /* VM_NAMED_ENTRY_LIST */ } __private_extern__ kern_return_t mach_memory_entry_allocate( - vm_named_entry_t *user_entry_p, - ipc_port_t *user_handle_p) + vm_named_entry_t *user_entry_p, + ipc_port_t *user_handle_p) { - vm_named_entry_t user_entry; - ipc_port_t user_handle; - ipc_port_t previous; + vm_named_entry_t user_entry; + ipc_port_t user_handle; user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry); - if (user_entry == NULL) - return KERN_FAILURE; - - named_entry_lock_init(user_entry); - - user_handle = ipc_port_alloc_kernel(); - if (user_handle == IP_NULL) { - kfree(user_entry, sizeof *user_entry); + if (user_entry == NULL) { return KERN_FAILURE; } - ip_lock(user_handle); + bzero(user_entry, sizeof(*user_entry)); - /* make a sonce right */ - user_handle->ip_sorights++; - ip_reference(user_handle); - - user_handle->ip_destination = IP_NULL; - user_handle->ip_receiver_name = MACH_PORT_NULL; - user_handle->ip_receiver = ipc_space_kernel; - - /* make a send right */ - user_handle->ip_mscount++; - user_handle->ip_srights++; - ip_reference(user_handle); - - ipc_port_nsrequest(user_handle, 1, user_handle, &previous); - /* nsrequest unlocks user_handle */ + named_entry_lock_init(user_entry); - user_entry->backing.pager = NULL; + user_entry->backing.object = NULL; user_entry->is_sub_map = FALSE; - user_entry->is_pager = FALSE; + user_entry->is_copy = FALSE; user_entry->internal = FALSE; user_entry->size = 0; user_entry->offset = 0; + user_entry->data_offset = 0; user_entry->protection = VM_PROT_NONE; user_entry->ref_count = 1; - ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry, - IKOT_NAMED_ENTRY); + user_handle = ipc_kobject_alloc_port((ipc_kobject_t)user_entry, + IKOT_NAMED_ENTRY, + IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST); *user_entry_p = user_entry; *user_handle_p = user_handle; +#if VM_NAMED_ENTRY_LIST + /* keep a loose (no reference) pointer to the Mach port, for debugging only */ + user_entry->named_entry_port = user_handle; + /* backtrace at allocation time, for debugging only */ + OSBacktrace(&user_entry->named_entry_bt[0], + NAMED_ENTRY_BT_DEPTH); + + /* add this new named entry to the global list */ + lck_mtx_lock_spin(&vm_named_entry_list_lock_data); + queue_enter(&vm_named_entry_list, user_entry, + vm_named_entry_t, named_entry_list); + vm_named_entry_count++; + lck_mtx_unlock(&vm_named_entry_list_lock_data); +#endif /* VM_NAMED_ENTRY_LIST */ + return KERN_SUCCESS; } @@ -2560,87 +3535,123 @@ mach_memory_entry_allocate( * * Create a named entry backed by the provided pager. * - * JMM - we need to hold a reference on the pager - - * and release it when the named entry is destroyed. */ kern_return_t mach_memory_object_memory_entry_64( - host_t host, - boolean_t internal, - vm_object_offset_t size, - vm_prot_t permission, - memory_object_t pager, - ipc_port_t *entry_handle) + host_t host, + boolean_t internal, + vm_object_offset_t size, + vm_prot_t permission, + memory_object_t pager, + ipc_port_t *entry_handle) { - unsigned int access; - vm_named_entry_t user_entry; - ipc_port_t user_handle; + unsigned int access; + vm_named_entry_t user_entry; + ipc_port_t user_handle; + vm_object_t object; + + if (host == HOST_NULL) { + return KERN_INVALID_HOST; + } - if (host == HOST_NULL) - return(KERN_INVALID_HOST); + if (pager == MEMORY_OBJECT_NULL && internal) { + object = vm_object_allocate(size); + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { + object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } + } else { + object = memory_object_to_vm_object(pager); + if (object != VM_OBJECT_NULL) { + vm_object_reference(object); + } + } + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } if (mach_memory_entry_allocate(&user_entry, &user_handle) != KERN_SUCCESS) { + vm_object_deallocate(object); return KERN_FAILURE; } - user_entry->backing.pager = pager; user_entry->size = size; user_entry->offset = 0; user_entry->protection = permission & VM_PROT_ALL; access = GET_MAP_MEM(permission); SET_MAP_MEM(access, user_entry->protection); - user_entry->internal = internal; user_entry->is_sub_map = FALSE; - user_entry->is_pager = TRUE; assert(user_entry->ref_count == 1); + user_entry->backing.object = object; + user_entry->internal = object->internal; + assert(object->internal == internal); + *entry_handle = user_handle; return KERN_SUCCESS; -} +} kern_return_t mach_memory_object_memory_entry( - host_t host, - boolean_t internal, - vm_size_t size, - vm_prot_t permission, - memory_object_t pager, - ipc_port_t *entry_handle) + host_t host, + boolean_t internal, + vm_size_t size, + vm_prot_t permission, + memory_object_t pager, + ipc_port_t *entry_handle) { - return mach_memory_object_memory_entry_64( host, internal, - (vm_object_offset_t)size, permission, pager, entry_handle); + return mach_memory_object_memory_entry_64( host, internal, + (vm_object_offset_t)size, permission, pager, entry_handle); } kern_return_t mach_memory_entry_purgable_control( - ipc_port_t entry_port, - vm_purgable_t control, - int *state) + ipc_port_t entry_port, + vm_purgable_t control, + int *state) +{ + if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { + /* not allowed from user-space */ + return KERN_INVALID_ARGUMENT; + } + + return memory_entry_purgeable_control_internal(entry_port, control, state); +} + +kern_return_t +memory_entry_purgeable_control_internal( + ipc_port_t entry_port, + vm_purgable_t control, + int *state) { - kern_return_t kr; - vm_named_entry_t mem_entry; - vm_object_t object; + kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; - if (entry_port == IP_NULL || + if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { return KERN_INVALID_ARGUMENT; } if (control != VM_PURGABLE_SET_STATE && - control != VM_PURGABLE_GET_STATE) - return(KERN_INVALID_ARGUMENT); + control != VM_PURGABLE_GET_STATE && + control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { + return KERN_INVALID_ARGUMENT; + } - if (control == VM_PURGABLE_SET_STATE && + if ((control == VM_PURGABLE_SET_STATE || + control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || - ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) - return(KERN_INVALID_ARGUMENT); + ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) { + return KERN_INVALID_ARGUMENT; + } - mem_entry = (vm_named_entry_t) entry_port->ip_kobject; + mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); named_entry_lock(mem_entry); - if (mem_entry->is_sub_map || mem_entry->is_pager) { + if (mem_entry->is_sub_map || + mem_entry->is_copy) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; } @@ -2654,7 +3665,7 @@ mach_memory_entry_purgable_control( vm_object_lock(object); /* check that named entry covers entire object ? */ - if (mem_entry->offset != 0 || object->size != mem_entry->size) { + if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { vm_object_unlock(object); named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; @@ -2669,6 +3680,198 @@ mach_memory_entry_purgable_control( return kr; } +kern_return_t +mach_memory_entry_access_tracking( + ipc_port_t entry_port, + int *access_tracking, + uint32_t *access_tracking_reads, + uint32_t *access_tracking_writes) +{ + return memory_entry_access_tracking_internal(entry_port, + access_tracking, + access_tracking_reads, + access_tracking_writes); +} + +kern_return_t +memory_entry_access_tracking_internal( + ipc_port_t entry_port, + int *access_tracking, + uint32_t *access_tracking_reads, + uint32_t *access_tracking_writes) +{ + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; + + if (!IP_VALID(entry_port) || + ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + + mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); + + named_entry_lock(mem_entry); + + if (mem_entry->is_sub_map || + mem_entry->is_copy) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + object = mem_entry->backing.object; + if (object == VM_OBJECT_NULL) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + +#if VM_OBJECT_ACCESS_TRACKING + vm_object_access_tracking(object, + access_tracking, + access_tracking_reads, + access_tracking_writes); + kr = KERN_SUCCESS; +#else /* VM_OBJECT_ACCESS_TRACKING */ + (void) access_tracking; + (void) access_tracking_reads; + (void) access_tracking_writes; + kr = KERN_NOT_SUPPORTED; +#endif /* VM_OBJECT_ACCESS_TRACKING */ + + named_entry_unlock(mem_entry); + + return kr; +} + +kern_return_t +mach_memory_entry_ownership( + ipc_port_t entry_port, + task_t owner, + int ledger_tag, + int ledger_flags) +{ + task_t cur_task; + kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + + cur_task = current_task(); + if (cur_task != kernel_task && + (owner != cur_task || + (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) || + ledger_tag == VM_LEDGER_TAG_NETWORK)) { + /* + * An entitlement is required to: + * + tranfer memory ownership to someone else, + * + request that the memory not count against the footprint, + * + tag as "network" (since that implies "no footprint") + */ + if (!cur_task->task_can_transfer_memory_ownership && + IOTaskHasEntitlement(cur_task, + "com.apple.private.memory.ownership_transfer")) { + cur_task->task_can_transfer_memory_ownership = TRUE; + } + if (!cur_task->task_can_transfer_memory_ownership) { + return KERN_NO_ACCESS; + } + } + + if (ledger_flags & ~VM_LEDGER_FLAGS) { + return KERN_INVALID_ARGUMENT; + } + if (ledger_tag <= 0 || + ledger_tag > VM_LEDGER_TAG_MAX) { + return KERN_INVALID_ARGUMENT; + } + + if (!IP_VALID(entry_port) || + ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); + + named_entry_lock(mem_entry); + + if (mem_entry->is_sub_map || + mem_entry->is_copy) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + object = mem_entry->backing.object; + if (object == VM_OBJECT_NULL) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + vm_object_lock(object); + + /* check that named entry covers entire object ? */ + if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { + vm_object_unlock(object); + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + named_entry_unlock(mem_entry); + + kr = vm_object_ownership_change(object, + ledger_tag, + owner, + ledger_flags, + FALSE); /* task_objq_locked */ + vm_object_unlock(object); + + return kr; +} + +kern_return_t +mach_memory_entry_get_page_counts( + ipc_port_t entry_port, + unsigned int *resident_page_count, + unsigned int *dirty_page_count) +{ + kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + vm_object_offset_t offset; + vm_object_size_t size; + + if (!IP_VALID(entry_port) || + ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + + mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); + + named_entry_lock(mem_entry); + + if (mem_entry->is_sub_map || + mem_entry->is_copy) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + object = mem_entry->backing.object; + if (object == VM_OBJECT_NULL) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + vm_object_lock(object); + + offset = mem_entry->offset; + size = mem_entry->size; + + named_entry_unlock(mem_entry); + + kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count); + + vm_object_unlock(object); + + return kr; +} + /* * mach_memory_entry_port_release: * @@ -2678,7 +3881,7 @@ mach_memory_entry_purgable_control( */ void mach_memory_entry_port_release( - ipc_port_t port) + ipc_port_t port) { assert(ip_kotype(port) == IKOT_NAMED_ENTRY); ipc_port_release_send(port); @@ -2698,29 +3901,43 @@ mach_memory_entry_port_release( */ void mach_destroy_memory_entry( - ipc_port_t port) + ipc_port_t port) { - vm_named_entry_t named_entry; + vm_named_entry_t named_entry; #if MACH_ASSERT assert(ip_kotype(port) == IKOT_NAMED_ENTRY); #endif /* MACH_ASSERT */ - named_entry = (vm_named_entry_t)port->ip_kobject; - lck_mtx_lock(&(named_entry)->Lock); + named_entry = (vm_named_entry_t) ip_get_kobject(port); + + named_entry_lock(named_entry); named_entry->ref_count -= 1; - if(named_entry->ref_count == 0) { + + if (named_entry->ref_count == 0) { if (named_entry->is_sub_map) { vm_map_deallocate(named_entry->backing.map); - } else if (!named_entry->is_pager) { - /* release the memory object we've been pointing to */ + } else if (named_entry->is_copy) { + vm_map_copy_discard(named_entry->backing.copy); + } else { + /* release the VM object we've been pointing to */ vm_object_deallocate(named_entry->backing.object); - } /* else JMM - need to drop reference on pager in that case */ - - lck_mtx_unlock(&(named_entry)->Lock); + } - kfree((void *) port->ip_kobject, - sizeof (struct vm_named_entry)); - } else - lck_mtx_unlock(&(named_entry)->Lock); + named_entry_unlock(named_entry); + named_entry_lock_destroy(named_entry); + +#if VM_NAMED_ENTRY_LIST + lck_mtx_lock_spin(&vm_named_entry_list_lock_data); + queue_remove(&vm_named_entry_list, named_entry, + vm_named_entry_t, named_entry_list); + assert(vm_named_entry_count > 0); + vm_named_entry_count--; + lck_mtx_unlock(&vm_named_entry_list_lock_data); +#endif /* VM_NAMED_ENTRY_LIST */ + + kfree(named_entry, sizeof(struct vm_named_entry)); + } else { + named_entry_unlock(named_entry); + } } /* Allow manipulation of individual page state. This is actually part of */ @@ -2728,26 +3945,27 @@ mach_destroy_memory_entry( kern_return_t mach_memory_entry_page_op( - ipc_port_t entry_port, - vm_object_offset_t offset, - int ops, - ppnum_t *phys_entry, - int *flags) + ipc_port_t entry_port, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags) { - vm_named_entry_t mem_entry; - vm_object_t object; - kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; - if (entry_port == IP_NULL || + if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { return KERN_INVALID_ARGUMENT; } - mem_entry = (vm_named_entry_t) entry_port->ip_kobject; + mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); named_entry_lock(mem_entry); - if (mem_entry->is_sub_map || mem_entry->is_pager) { + if (mem_entry->is_sub_map || + mem_entry->is_copy) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; } @@ -2763,43 +3981,44 @@ mach_memory_entry_page_op( kr = vm_object_page_op(object, offset, ops, phys_entry, flags); - vm_object_deallocate(object); + vm_object_deallocate(object); return kr; } /* - * mach_memory_entry_range_op offers performance enhancement over - * mach_memory_entry_page_op for page_op functions which do not require page - * level state to be returned from the call. Page_op was created to provide - * a low-cost alternative to page manipulation via UPLs when only a single - * page was involved. The range_op call establishes the ability in the _op + * mach_memory_entry_range_op offers performance enhancement over + * mach_memory_entry_page_op for page_op functions which do not require page + * level state to be returned from the call. Page_op was created to provide + * a low-cost alternative to page manipulation via UPLs when only a single + * page was involved. The range_op call establishes the ability in the _op * family of functions to work on multiple pages where the lack of page level * state handling allows the caller to avoid the overhead of the upl structures. */ kern_return_t mach_memory_entry_range_op( - ipc_port_t entry_port, - vm_object_offset_t offset_beg, - vm_object_offset_t offset_end, + ipc_port_t entry_port, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, int ops, int *range) { - vm_named_entry_t mem_entry; - vm_object_t object; - kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; - if (entry_port == IP_NULL || + if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { return KERN_INVALID_ARGUMENT; } - mem_entry = (vm_named_entry_t) entry_port->ip_kobject; + mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); named_entry_lock(mem_entry); - if (mem_entry->is_sub_map || mem_entry->is_pager) { + if (mem_entry->is_sub_map || + mem_entry->is_copy) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; } @@ -2814,45 +4033,16 @@ mach_memory_entry_range_op( named_entry_unlock(mem_entry); kr = vm_object_range_op(object, - offset_beg, - offset_end, - ops, - (uint32_t *) range); + offset_beg, + offset_end, + ops, + (uint32_t *) range); vm_object_deallocate(object); return kr; } - -kern_return_t -set_dp_control_port( - host_priv_t host_priv, - ipc_port_t control_port) -{ - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); - - if (IP_VALID(dynamic_pager_control_port)) - ipc_port_release_send(dynamic_pager_control_port); - - dynamic_pager_control_port = control_port; - return KERN_SUCCESS; -} - -kern_return_t -get_dp_control_port( - host_priv_t host_priv, - ipc_port_t *control_port) -{ - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); - - *control_port = ipc_port_copy_send(dynamic_pager_control_port); - return KERN_SUCCESS; - -} - /* ******* Temporary Internal calls to UPL for BSD ***** */ extern int kernel_upl_map( @@ -2867,15 +4057,15 @@ extern int kernel_upl_unmap( extern int kernel_upl_commit( upl_t upl, upl_page_info_t *pl, - mach_msg_type_number_t count); + mach_msg_type_number_t count); extern int kernel_upl_commit_range( upl_t upl, upl_offset_t offset, - upl_size_t size, - int flags, - upl_page_info_array_t pl, - mach_msg_type_number_t count); + upl_size_t size, + int flags, + upl_page_info_array_t pl, + mach_msg_type_number_t count); extern int kernel_upl_abort( upl_t upl, @@ -2890,9 +4080,9 @@ extern int kernel_upl_abort_range( kern_return_t kernel_upl_map( - vm_map_t map, - upl_t upl, - vm_offset_t *dst_addr) + vm_map_t map, + upl_t upl, + vm_offset_t *dst_addr) { return vm_upl_map(map, upl, dst_addr); } @@ -2900,8 +4090,8 @@ kernel_upl_map( kern_return_t kernel_upl_unmap( - vm_map_t map, - upl_t upl) + vm_map_t map, + upl_t upl) { return vm_upl_unmap(map, upl); } @@ -2912,7 +4102,7 @@ kernel_upl_commit( upl_page_info_t *pl, mach_msg_type_number_t count) { - kern_return_t kr; + kern_return_t kr; kr = upl_commit(upl, pl, count); upl_deallocate(upl); @@ -2922,18 +4112,19 @@ kernel_upl_commit( kern_return_t kernel_upl_commit_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int flags, + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int flags, upl_page_info_array_t pl, mach_msg_type_number_t count) { - boolean_t finished = FALSE; - kern_return_t kr; + boolean_t finished = FALSE; + kern_return_t kr; - if (flags & UPL_COMMIT_FREE_ON_EMPTY) + if (flags & UPL_COMMIT_FREE_ON_EMPTY) { flags |= UPL_COMMIT_NOTIFY_EMPTY; + } if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { return KERN_INVALID_ARGUMENT; @@ -2941,39 +4132,42 @@ kernel_upl_commit_range( kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); - if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) + if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) { upl_deallocate(upl); + } return kr; } - + kern_return_t kernel_upl_abort_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int abort_flags) + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int abort_flags) { - kern_return_t kr; - boolean_t finished = FALSE; + kern_return_t kr; + boolean_t finished = FALSE; - if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) + if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) { abort_flags |= UPL_COMMIT_NOTIFY_EMPTY; + } kr = upl_abort_range(upl, offset, size, abort_flags, &finished); - if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) + if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) { upl_deallocate(upl); + } return kr; } kern_return_t kernel_upl_abort( - upl_t upl, - int abort_type) + upl_t upl, + int abort_type) { - kern_return_t kr; + kern_return_t kr; kr = upl_abort(upl, abort_type); upl_deallocate(upl); @@ -2988,15 +4182,15 @@ kernel_upl_abort( kern_return_t vm_region_object_create( - __unused vm_map_t target_map, - vm_size_t size, - ipc_port_t *object_handle) + __unused vm_map_t target_map, + vm_size_t size, + ipc_port_t *object_handle) { - vm_named_entry_t user_entry; - ipc_port_t user_handle; + vm_named_entry_t user_entry; + ipc_port_t user_handle; + + vm_map_t new_map; - vm_map_t new_map; - if (mach_memory_entry_allocate(&user_entry, &user_handle) != KERN_SUCCESS) { return KERN_FAILURE; @@ -3005,7 +4199,10 @@ vm_region_object_create( /* Create a named object based on a submap of specified size */ new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS, - vm_map_round_page(size), TRUE); + vm_map_round_page(size, + VM_MAP_PAGE_MASK(target_map)), + TRUE); + vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map)); user_entry->backing.map = new_map; user_entry->internal = TRUE; @@ -3017,73 +4214,73 @@ vm_region_object_create( *object_handle = user_handle; return KERN_SUCCESS; - } -ppnum_t vm_map_get_phys_page( /* forward */ - vm_map_t map, - vm_offset_t offset); +ppnum_t vm_map_get_phys_page( /* forward */ + vm_map_t map, + vm_offset_t offset); ppnum_t vm_map_get_phys_page( - vm_map_t map, - vm_offset_t addr) + vm_map_t map, + vm_offset_t addr) { - vm_object_offset_t offset; - vm_object_t object; - vm_map_offset_t map_offset; - vm_map_entry_t entry; - ppnum_t phys_page = 0; + vm_object_offset_t offset; + vm_object_t object; + vm_map_offset_t map_offset; + vm_map_entry_t entry; + ppnum_t phys_page = 0; - map_offset = vm_map_trunc_page(addr); + map_offset = vm_map_trunc_page(addr, PAGE_MASK); vm_map_lock(map); while (vm_map_lookup_entry(map, map_offset, &entry)) { - - if (entry->object.vm_object == VM_OBJECT_NULL) { + if (VME_OBJECT(entry) == VM_OBJECT_NULL) { vm_map_unlock(map); return (ppnum_t) 0; } if (entry->is_sub_map) { - vm_map_t old_map; - vm_map_lock(entry->object.sub_map); + vm_map_t old_map; + vm_map_lock(VME_SUBMAP(entry)); old_map = map; - map = entry->object.sub_map; - map_offset = entry->offset + (map_offset - entry->vme_start); + map = VME_SUBMAP(entry); + map_offset = (VME_OFFSET(entry) + + (map_offset - entry->vme_start)); vm_map_unlock(old_map); continue; } - if (entry->object.vm_object->phys_contiguous) { + if (VME_OBJECT(entry)->phys_contiguous) { /* These are not standard pageable memory mappings */ /* If they are not present in the object they will */ /* have to be picked up from the pager through the */ /* fault mechanism. */ - if(entry->object.vm_object->shadow_offset == 0) { + if (VME_OBJECT(entry)->vo_shadow_offset == 0) { /* need to call vm_fault */ vm_map_unlock(map); - vm_fault(map, map_offset, VM_PROT_NONE, - FALSE, THREAD_UNINT, NULL, 0); + vm_fault(map, map_offset, VM_PROT_NONE, + FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); vm_map_lock(map); continue; } - offset = entry->offset + (map_offset - entry->vme_start); + offset = (VME_OFFSET(entry) + + (map_offset - entry->vme_start)); phys_page = (ppnum_t) - ((entry->object.vm_object->shadow_offset - + offset) >> 12); + ((VME_OBJECT(entry)->vo_shadow_offset + + offset) >> PAGE_SHIFT); break; - } - offset = entry->offset + (map_offset - entry->vme_start); - object = entry->object.vm_object; + offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start)); + object = VME_OBJECT(entry); vm_object_lock(object); while (TRUE) { - vm_page_t dst_page = vm_page_lookup(object,offset); - if(dst_page == VM_PAGE_NULL) { - if(object->shadow) { + vm_page_t dst_page = vm_page_lookup(object, offset); + if (dst_page == VM_PAGE_NULL) { + if (object->shadow) { vm_object_t old_object; vm_object_lock(object->shadow); old_object = object; - offset = offset + object->shadow_offset; + offset = offset + object->vo_shadow_offset; object = object->shadow; vm_object_unlock(old_object); } else { @@ -3091,44 +4288,42 @@ vm_map_get_phys_page( break; } } else { - phys_page = (ppnum_t)(dst_page->phys_page); + phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)); vm_object_unlock(object); break; } } break; - - } + } vm_map_unlock(map); return phys_page; } - - -kern_return_t kernel_object_iopl_request( /* forward */ - vm_named_entry_t named_entry, - memory_object_offset_t offset, - upl_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - int *flags); +#if 0 +kern_return_t kernel_object_iopl_request( /* forward */ + vm_named_entry_t named_entry, + memory_object_offset_t offset, + upl_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags); kern_return_t kernel_object_iopl_request( - vm_named_entry_t named_entry, - memory_object_offset_t offset, - upl_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - int *flags) + vm_named_entry_t named_entry, + memory_object_offset_t offset, + upl_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags) { - vm_object_t object; - kern_return_t ret; + vm_object_t object; + kern_return_t ret; - int caller_flags; + int caller_flags; caller_flags = *flags; @@ -3141,83 +4336,56 @@ kernel_object_iopl_request( } /* a few checks to make sure user is obeying rules */ - if(*upl_size == 0) { - if(offset >= named_entry->size) - return(KERN_INVALID_RIGHT); + if (*upl_size == 0) { + if (offset >= named_entry->size) { + return KERN_INVALID_RIGHT; + } *upl_size = (upl_size_t) (named_entry->size - offset); - if (*upl_size != named_entry->size - offset) + if (*upl_size != named_entry->size - offset) { return KERN_INVALID_ARGUMENT; + } } - if(caller_flags & UPL_COPYOUT_FROM) { - if((named_entry->protection & VM_PROT_READ) - != VM_PROT_READ) { - return(KERN_INVALID_RIGHT); + if (caller_flags & UPL_COPYOUT_FROM) { + if ((named_entry->protection & VM_PROT_READ) + != VM_PROT_READ) { + return KERN_INVALID_RIGHT; } } else { - if((named_entry->protection & - (VM_PROT_READ | VM_PROT_WRITE)) - != (VM_PROT_READ | VM_PROT_WRITE)) { - return(KERN_INVALID_RIGHT); + if ((named_entry->protection & + (VM_PROT_READ | VM_PROT_WRITE)) + != (VM_PROT_READ | VM_PROT_WRITE)) { + return KERN_INVALID_RIGHT; } } - if(named_entry->size < (offset + *upl_size)) - return(KERN_INVALID_ARGUMENT); + if (named_entry->size < (offset + *upl_size)) { + return KERN_INVALID_ARGUMENT; + } /* the callers parameter offset is defined to be the */ /* offset from beginning of named entry offset in object */ offset = offset + named_entry->offset; - if(named_entry->is_sub_map) - return (KERN_INVALID_ARGUMENT); - - named_entry_lock(named_entry); - - if (named_entry->is_pager) { - object = vm_object_enter(named_entry->backing.pager, - named_entry->offset + named_entry->size, - named_entry->internal, - FALSE, - FALSE); - if (object == VM_OBJECT_NULL) { - named_entry_unlock(named_entry); - return(KERN_INVALID_OBJECT); - } - - /* JMM - drop reference on the pager here? */ - - /* create an extra reference for the object */ - vm_object_lock(object); - vm_object_reference_locked(object); - named_entry->backing.object = object; - named_entry->is_pager = FALSE; - named_entry_unlock(named_entry); + if (named_entry->is_sub_map || + named_entry->is_copy) { + return KERN_INVALID_ARGUMENT; + } - /* wait for object (if any) to be ready */ - if (!named_entry->internal) { - while (!object->pager_ready) { - vm_object_wait(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); - vm_object_lock(object); - } - } - vm_object_unlock(object); + named_entry_lock(named_entry); - } else { - /* This is the case where we are going to operate */ - /* an an already known object. If the object is */ - /* not ready it is internal. An external */ - /* object cannot be mapped until it is ready */ - /* we can therefore avoid the ready check */ - /* in this case. */ - object = named_entry->backing.object; - vm_object_reference(object); - named_entry_unlock(named_entry); - } + /* This is the case where we are going to operate */ + /* on an already known object. If the object is */ + /* not ready it is internal. An external */ + /* object cannot be mapped until it is ready */ + /* we can therefore avoid the ready check */ + /* in this case. */ + object = named_entry->backing.object; + vm_object_reference(object); + named_entry_unlock(named_entry); if (!object->private) { - if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) - *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); + if (*upl_size > MAX_UPL_TRANSFER_BYTES) { + *upl_size = MAX_UPL_TRANSFER_BYTES; + } if (object->phys_contiguous) { *flags = UPL_PHYS_CONTIG; } else { @@ -3228,12 +4396,123 @@ kernel_object_iopl_request( } ret = vm_object_iopl_request(object, - offset, - *upl_size, - upl_ptr, - user_page_list, - page_list_count, - caller_flags); + offset, + *upl_size, + upl_ptr, + user_page_list, + page_list_count, + (upl_control_flags_t)(unsigned int)caller_flags); vm_object_deallocate(object); return ret; } +#endif + +/* + * These symbols are looked up at runtime by vmware, VirtualBox, + * despite not being exported in the symbol sets. + */ + +#if defined(__x86_64__) + +kern_return_t +mach_vm_map( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); + +kern_return_t +mach_vm_remap( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); + +kern_return_t +mach_vm_map( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + return mach_vm_map_external(target_map, address, initial_size, mask, flags, port, + offset, copy, cur_protection, max_protection, inheritance); +} + +kern_return_t +mach_vm_remap( + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) +{ + return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address, + copy, cur_protection, max_protection, inheritance); +} + +kern_return_t +vm_map( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); + +kern_return_t +vm_map( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_tag_t tag; + + VM_GET_FLAGS_ALIAS(flags, tag); + return vm_map_kernel(target_map, address, size, mask, + flags, VM_MAP_KERNEL_FLAGS_NONE, tag, + port, offset, copy, + cur_protection, max_protection, inheritance); +} + +#endif /* __x86_64__ */