/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* User-exported virtual memory functions.
*/
+/*
+ * There are three implementations of the "XXX_allocate" functionality in
+ * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
+ * (for a task with the same address space size, especially the current task),
+ * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
+ * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
+ * makes sense on platforms where a user task can either be 32 or 64, or the kernel
+ * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
+ * for new code.
+ *
+ * The entrypoints into the kernel are more complex. All platforms support a
+ * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
+ * size types for the platform. On platforms that only support U32/K32,
+ * subsystem 4800 is all you need. On platforms that support both U32 and U64,
+ * subsystem 3800 is used disambiguate the size of parameters, and they will
+ * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
+ * the MIG glue should never call into vm_allocate directly, because the calling
+ * task and kernel_task are unlikely to use the same size parameters
+ *
+ * New VM call implementations should be added here and to mach_vm.defs
+ * (subsystem 4800), and use mach_vm_* "wide" types.
+ */
+
#include <debug.h>
#include <vm_cpm.h>
#include <mach/host_priv_server.h>
#include <mach/mach_vm_server.h>
-#include <mach/shared_memory_server.h>
#include <mach/vm_map_server.h>
-#include <vm/vm_shared_memory_server.h>
#include <kern/host.h>
#include <kern/kalloc.h>
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t result;
- boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
+ boolean_t anywhere;
+
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_ALLOCATE)
+ return KERN_INVALID_ARGUMENT;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
return(KERN_SUCCESS);
}
+ anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
if (anywhere) {
/*
* No specific address requested, so start candidate address
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t result;
- boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
+ boolean_t anywhere;
+
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_ALLOCATE)
+ return KERN_INVALID_ARGUMENT;
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
return(KERN_SUCCESS);
}
+ anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
if (anywhere) {
/*
* No specific address requested, so start candidate address
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
+ if ((mach_msg_type_number_t) size != size)
+ return KERN_INVALID_ARGUMENT;
error = vm_map_copyin(map,
(vm_map_address_t)addr,
if (KERN_SUCCESS == error) {
*data = (pointer_t) ipc_address;
- *data_size = size;
+ *data_size = (mach_msg_type_number_t) size;
+ assert(*data_size == size);
}
return(error);
}
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
+ if (size > (unsigned)(mach_msg_type_number_t) -1) {
+ /*
+ * The kernel could handle a 64-bit "size" value, but
+ * it could not return the size of the data in "*data_size"
+ * without overflowing.
+ * Let's reject this "size" as invalid.
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+
error = vm_map_copyin(map,
(vm_map_address_t)addr,
(vm_map_size_t)size,
if (KERN_SUCCESS == error) {
*data = (pointer_t) ipc_address;
- *data_size = size;
+ *data_size = (mach_msg_type_number_t) size;
+ assert(*data_size == size);
}
return(error);
}
vm_prot_t max_protection,
vm_inherit_t inheritance)
{
- vm_map_address_t map_addr;
- vm_map_size_t map_size;
- vm_object_t object;
- vm_object_size_t size;
- kern_return_t result;
-
- /*
- * Check arguments for validity
- */
- if ((target_map == VM_MAP_NULL) ||
- (cur_protection & ~VM_PROT_ALL) ||
- (max_protection & ~VM_PROT_ALL) ||
- (inheritance > VM_INHERIT_LAST_VALID) ||
- initial_size == 0)
- return(KERN_INVALID_ARGUMENT);
-
- map_addr = vm_map_trunc_page(*address);
- map_size = vm_map_round_page(initial_size);
- size = vm_object_round_page(initial_size);
-
- /*
- * Find the vm object (if any) corresponding to this port.
- */
- if (!IP_VALID(port)) {
- object = VM_OBJECT_NULL;
- offset = 0;
- copy = FALSE;
- } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
- vm_named_entry_t named_entry;
-
- named_entry = (vm_named_entry_t)port->ip_kobject;
- /* a few checks to make sure user is obeying rules */
- if(size == 0) {
- if(offset >= named_entry->size)
- return(KERN_INVALID_RIGHT);
- size = named_entry->size - offset;
- }
- if((named_entry->protection & max_protection) != max_protection)
- return(KERN_INVALID_RIGHT);
- if((named_entry->protection & cur_protection) != cur_protection)
- return(KERN_INVALID_RIGHT);
- if(named_entry->size < (offset + size))
- return(KERN_INVALID_ARGUMENT);
-
- /* the callers parameter offset is defined to be the */
- /* offset from beginning of named entry offset in object */
- offset = offset + named_entry->offset;
-
- named_entry_lock(named_entry);
- if(named_entry->is_sub_map) {
- vm_map_entry_t map_entry;
-
- named_entry_unlock(named_entry);
- vm_object_reference(vm_submap_object);
- if ((result = vm_map_enter(target_map,
- &map_addr, map_size,
- (vm_map_offset_t)mask, flags,
- vm_submap_object, 0,
- FALSE,
- cur_protection, max_protection, inheritance
- )) != KERN_SUCCESS) {
- vm_object_deallocate(vm_submap_object);
- } else {
- char alias;
-
- VM_GET_FLAGS_ALIAS(flags, alias);
- if ((alias == VM_MEMORY_SHARED_PMAP) &&
- !copy) {
- vm_map_submap(target_map, map_addr,
- map_addr + map_size,
- named_entry->backing.map,
- (vm_map_offset_t)offset, TRUE);
- } else {
- vm_map_submap(target_map, map_addr,
- map_addr + map_size,
- named_entry->backing.map,
- (vm_map_offset_t)offset, FALSE);
- }
- if(copy) {
- if(vm_map_lookup_entry(
- target_map, map_addr, &map_entry)) {
- map_entry->needs_copy = TRUE;
- }
- }
- *address = map_addr;
- }
- return(result);
-
- } else if (named_entry->is_pager) {
- unsigned int access;
- vm_prot_t protections;
- unsigned int wimg_mode;
- boolean_t cache_attr;
-
- protections = named_entry->protection
- & VM_PROT_ALL;
- access = GET_MAP_MEM(named_entry->protection);
-
- object = vm_object_enter(
- named_entry->backing.pager,
- named_entry->size,
- named_entry->internal,
- FALSE,
- FALSE);
- if (object == VM_OBJECT_NULL) {
- named_entry_unlock(named_entry);
- return(KERN_INVALID_OBJECT);
- }
-
- /* JMM - drop reference on pager here */
-
- /* create an extra ref for the named entry */
- vm_object_lock(object);
- vm_object_reference_locked(object);
- named_entry->backing.object = object;
- named_entry->is_pager = FALSE;
- named_entry_unlock(named_entry);
-
- wimg_mode = object->wimg_bits;
- if(access == MAP_MEM_IO) {
- wimg_mode = VM_WIMG_IO;
- } else if (access == MAP_MEM_COPYBACK) {
- wimg_mode = VM_WIMG_USE_DEFAULT;
- } else if (access == MAP_MEM_WTHRU) {
- wimg_mode = VM_WIMG_WTHRU;
- } else if (access == MAP_MEM_WCOMB) {
- wimg_mode = VM_WIMG_WCOMB;
- }
- if ((wimg_mode == VM_WIMG_IO)
- || (wimg_mode == VM_WIMG_WCOMB))
- cache_attr = TRUE;
- else
- cache_attr = FALSE;
-
- /* wait for object (if any) to be ready */
- if (!named_entry->internal) {
- while (!object->pager_ready) {
- vm_object_wait(object,
- VM_OBJECT_EVENT_PAGER_READY,
- THREAD_UNINT);
- vm_object_lock(object);
- }
- }
-
- if(object->wimg_bits != wimg_mode) {
- vm_page_t p;
-
- vm_object_paging_wait(object, THREAD_UNINT);
-
- object->wimg_bits = wimg_mode;
- queue_iterate(&object->memq, p, vm_page_t, listq) {
- if (!p->fictitious) {
- pmap_disconnect(p->phys_page);
- if (cache_attr)
- pmap_sync_page_attributes_phys(p->phys_page);
- }
- }
- }
- object->true_share = TRUE;
- if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
- object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
- vm_object_unlock(object);
- } else {
- /* This is the case where we are going to map */
- /* an already mapped object. If the object is */
- /* not ready it is internal. An external */
- /* object cannot be mapped until it is ready */
- /* we can therefore avoid the ready check */
- /* in this case. */
- object = named_entry->backing.object;
- assert(object != VM_OBJECT_NULL);
- named_entry_unlock(named_entry);
- vm_object_reference(object);
- }
- } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
- /*
- * JMM - This is temporary until we unify named entries
- * and raw memory objects.
- *
- * Detected fake ip_kotype for a memory object. In
- * this case, the port isn't really a port at all, but
- * instead is just a raw memory object.
- */
-
- if ((object = vm_object_enter((memory_object_t)port,
- size, FALSE, FALSE, FALSE))
- == VM_OBJECT_NULL)
- return(KERN_INVALID_OBJECT);
-
- /* wait for object (if any) to be ready */
- if (object != VM_OBJECT_NULL) {
- if(object == kernel_object) {
- printf("Warning: Attempt to map kernel object"
- " by a non-private kernel entity\n");
- return(KERN_INVALID_OBJECT);
- }
- vm_object_lock(object);
- while (!object->pager_ready) {
- vm_object_wait(object,
- VM_OBJECT_EVENT_PAGER_READY,
- THREAD_UNINT);
- vm_object_lock(object);
- }
- vm_object_unlock(object);
- }
- } else {
- return (KERN_INVALID_OBJECT);
- }
-
- /*
- * Perform the copy if requested
- */
-
- if (copy) {
- vm_object_t new_object;
- vm_object_offset_t new_offset;
-
- result = vm_object_copy_strategically(object, offset, size,
- &new_object, &new_offset,
- ©);
-
-
- if (result == KERN_MEMORY_RESTART_COPY) {
- boolean_t success;
- boolean_t src_needs_copy;
-
- /*
- * XXX
- * We currently ignore src_needs_copy.
- * This really is the issue of how to make
- * MEMORY_OBJECT_COPY_SYMMETRIC safe for
- * non-kernel users to use. Solution forthcoming.
- * In the meantime, since we don't allow non-kernel
- * memory managers to specify symmetric copy,
- * we won't run into problems here.
- */
- new_object = object;
- new_offset = offset;
- success = vm_object_copy_quickly(&new_object,
- new_offset, size,
- &src_needs_copy,
- ©);
- assert(success);
- result = KERN_SUCCESS;
- }
- /*
- * Throw away the reference to the
- * original object, as it won't be mapped.
- */
-
- vm_object_deallocate(object);
-
- if (result != KERN_SUCCESS)
- return (result);
-
- object = new_object;
- offset = new_offset;
- }
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_MAP)
+ return KERN_INVALID_ARGUMENT;
- if ((result = vm_map_enter(target_map,
- &map_addr, map_size,
- (vm_map_offset_t)mask,
- flags,
- object, offset,
- copy,
- cur_protection, max_protection, inheritance
- )) != KERN_SUCCESS)
- vm_object_deallocate(object);
- *address = map_addr;
- return(result);
+ return vm_map_enter_mem_object(target_map,
+ address,
+ initial_size,
+ mask,
+ flags,
+ port,
+ offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
}
kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
port, offset, copy,
cur_protection, max_protection, inheritance);
- *address = CAST_DOWN(vm_address_t, map_addr);
+ *address = CAST_DOWN(vm_offset_t, map_addr);
return kr;
}
kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
port, obj_offset, copy,
cur_protection, max_protection, inheritance);
- *address = CAST_DOWN(vm_address_t, map_addr);
+ *address = CAST_DOWN(vm_offset_t, map_addr);
return kr;
}
mach_vm_offset_t *address,
mach_vm_size_t size,
mach_vm_offset_t mask,
- boolean_t anywhere,
+ int flags,
vm_map_t src_map,
mach_vm_offset_t memory_address,
boolean_t copy,
if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
return KERN_INVALID_ARGUMENT;
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_REMAP)
+ return KERN_INVALID_ARGUMENT;
+
map_addr = (vm_map_offset_t)*address;
kr = vm_map_remap(target_map,
&map_addr,
size,
mask,
- anywhere,
+ flags,
src_map,
memory_address,
copy,
vm_offset_t *address,
vm_size_t size,
vm_offset_t mask,
- boolean_t anywhere,
+ int flags,
vm_map_t src_map,
vm_offset_t memory_address,
boolean_t copy,
if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map)
return KERN_INVALID_ARGUMENT;
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_REMAP)
+ return KERN_INVALID_ARGUMENT;
+
map_addr = (vm_map_offset_t)*address;
kr = vm_map_remap(target_map,
&map_addr,
size,
mask,
- anywhere,
+ flags,
src_map,
memory_address,
copy,
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
- if (access & ~VM_PROT_ALL)
+ if (access & ~VM_PROT_ALL || (start + size < start))
return KERN_INVALID_ARGUMENT;
if (access != VM_PROT_NONE) {
}
+int
+vm_toggle_entry_reuse(int toggle, int *old_value)
+{
+ vm_map_t map = current_map();
+
+ if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){
+ *old_value = map->disable_vmentry_reuse;
+ } else if(toggle == VM_TOGGLE_SET){
+ vm_map_lock(map);
+ map->disable_vmentry_reuse = TRUE;
+ if (map->first_free == vm_map_to_entry(map)) {
+ map->highest_entry_end = vm_map_min(map);
+ } else {
+ map->highest_entry_end = map->first_free->vme_end;
+ }
+ vm_map_unlock(map);
+ } else if (toggle == VM_TOGGLE_CLEAR){
+ vm_map_lock(map);
+ map->disable_vmentry_reuse = FALSE;
+ vm_map_unlock(map);
+ } else
+ return KERN_INVALID_ARGUMENT;
+
+ return KERN_SUCCESS;
+}
+
/*
* mach_vm_behavior_set
*
return kr;
}
+kern_return_t
+mach_vm_purgable_control(
+ vm_map_t map,
+ mach_vm_offset_t address,
+ vm_purgable_t control,
+ int *state)
+{
+ if (VM_MAP_NULL == map)
+ return KERN_INVALID_ARGUMENT;
+
+ return vm_map_purgable_control(map,
+ vm_map_trunc_page(address),
+ control,
+ state);
+}
+
kern_return_t
vm_purgable_control(
vm_map_t map,
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
- return vm_map_page_info(map,
- vm_map_trunc_page(offset),
- disposition, ref_count);
+ return vm_map_page_query_internal(map,
+ vm_map_trunc_page(offset),
+ disposition, ref_count);
}
kern_return_t
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
- return vm_map_page_info(map,
- vm_map_trunc_page(offset),
- disposition, ref_count);
+ return vm_map_page_query_internal(map,
+ vm_map_trunc_page(offset),
+ disposition, ref_count);
+}
+
+kern_return_t
+mach_vm_page_info(
+ vm_map_t map,
+ mach_vm_address_t address,
+ vm_page_info_flavor_t flavor,
+ vm_page_info_t info,
+ mach_msg_type_number_t *count)
+{
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = vm_map_page_info(map, address, flavor, info, count);
+ return kr;
}
/* map a (whole) upl into an address space */
vm_upl_map(
vm_map_t map,
upl_t upl,
- vm_offset_t *dst_addr)
+ vm_address_t *dst_addr)
{
vm_map_offset_t map_addr;
kern_return_t kr;
return KERN_INVALID_ARGUMENT;
kr = vm_map_enter_upl(map, upl, &map_addr);
- *dst_addr = CAST_DOWN(vm_offset_t, map_addr);
+ *dst_addr = CAST_DOWN(vm_address_t, map_addr);
return kr;
}
return kr;
}
-
-__private_extern__ kern_return_t
-mach_memory_entry_allocate(
- vm_named_entry_t *user_entry_p,
- ipc_port_t *user_handle_p); /* forward */
-
/*
* mach_make_memory_entry_64
*
boolean_t wired;
vm_object_offset_t obj_off;
vm_prot_t prot;
- vm_map_offset_t lo_offset, hi_offset;
- vm_behavior_t behavior;
+ struct vm_object_fault_info fault_info;
vm_object_t object;
vm_object_t shadow_object;
unsigned int access;
vm_prot_t protections;
+ vm_prot_t original_protections, mask_protections;
unsigned int wimg_mode;
- boolean_t cache_attr = FALSE;
if (((permission & 0x00FF0000) &
~(MAP_MEM_ONLY |
parent_entry = NULL;
}
- protections = permission & VM_PROT_ALL;
+ original_protections = permission & VM_PROT_ALL;
+ protections = original_protections;
+ mask_protections = permission & VM_PROT_IS_MASK;
access = GET_MAP_MEM(permission);
user_handle = IP_NULL;
if(parent_is_object && object != VM_OBJECT_NULL)
wimg_mode = object->wimg_bits;
else
- wimg_mode = VM_WIMG_DEFAULT;
+ wimg_mode = VM_WIMG_USE_DEFAULT;
if((access != GET_MAP_MEM(parent_entry->protection)) &&
!(parent_entry->protection & VM_PROT_WRITE)) {
return KERN_INVALID_RIGHT;
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
SET_MAP_MEM(access, parent_entry->protection);
- wimg_mode = VM_WIMG_DEFAULT;
+ wimg_mode = VM_WIMG_USE_DEFAULT;
} else if (access == MAP_MEM_WTHRU) {
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_WTHRU;
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_WCOMB;
}
- if(parent_is_object && object &&
+ if (parent_is_object && object &&
(access != MAP_MEM_NOOP) &&
(!(object->nophyscache))) {
- if(object->wimg_bits != wimg_mode) {
- vm_page_t p;
- if ((wimg_mode == VM_WIMG_IO)
- || (wimg_mode == VM_WIMG_WCOMB))
- cache_attr = TRUE;
- else
- cache_attr = FALSE;
- vm_object_lock(object);
- vm_object_paging_wait(object, THREAD_UNINT);
- object->wimg_bits = wimg_mode;
- queue_iterate(&object->memq,
- p, vm_page_t, listq) {
- if (!p->fictitious) {
- pmap_disconnect(p->phys_page);
- if (cache_attr)
- pmap_sync_page_attributes_phys(p->phys_page);
- }
- }
- vm_object_unlock(object);
+
+ if (object->wimg_bits != wimg_mode) {
+ vm_object_lock(object);
+ vm_object_change_wimg_mode(object, wimg_mode);
+ vm_object_unlock(object);
}
}
if (object_handle)
/*
* Force the creation of the VM object now.
*/
- if (map_size > (vm_map_size_t) VM_MAX_ADDRESS) {
+ if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
/*
- * LP64todo - for now, we can only allocate 4GB
+ * LP64todo - for now, we can only allocate 4GB-4096
* internal objects because the default pager can't
* page bigger ones. Remove this when it can.
*/
kr = KERN_INVALID_ARGUMENT;
goto make_mem_done;
}
- object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE;
+ object->purgable = VM_PURGABLE_NONVOLATILE;
}
/*
if (access == MAP_MEM_IO) {
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
- wimg_mode = VM_WIMG_DEFAULT;
+ wimg_mode = VM_WIMG_USE_DEFAULT;
} else if (access == MAP_MEM_WTHRU) {
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
/* Create a named object based on address range within the task map */
/* Go find the object at given address */
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
redo_lookup:
+ protections = original_protections;
vm_map_lock_read(target_map);
/* get the object associated with the target address */
/* that requested by the caller */
kr = vm_map_lookup_locked(&target_map, map_offset,
- protections, &version,
- &object, &obj_off, &prot, &wired, &behavior,
- &lo_offset, &hi_offset, &real_map);
+ protections | mask_protections,
+ OBJECT_LOCK_EXCLUSIVE, &version,
+ &object, &obj_off, &prot, &wired,
+ &fault_info,
+ &real_map);
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(target_map);
goto make_mem_done;
}
+ if (mask_protections) {
+ /*
+ * The caller asked us to use the "protections" as
+ * a mask, so restrict "protections" to what this
+ * mapping actually allows.
+ */
+ protections &= prot;
+ }
if (((prot & protections) != protections)
|| (object == kernel_object)) {
kr = KERN_INVALID_RIGHT;
/* JMM - The check below should be reworked instead. */
object->true_share = TRUE;
}
+ if (mask_protections) {
+ /*
+ * The caller asked us to use the "protections" as
+ * a mask, so restrict "protections" to what this
+ * mapping actually allows.
+ */
+ protections &= map_entry->max_protection;
+ }
if(((map_entry->max_protection) & protections) != protections) {
kr = KERN_INVALID_RIGHT;
vm_object_unlock(object);
goto make_mem_done;
}
- mappable_size = hi_offset - obj_off;
+ mappable_size = fault_info.hi_offset - obj_off;
total_size = map_entry->vme_end - map_entry->vme_start;
if(map_size > mappable_size) {
/* try to extend mappable size if the entries */
next_entry->vme_prev->offset +
(next_entry->vme_prev->vme_end -
next_entry->vme_prev->vme_start))) {
+ if (mask_protections) {
+ /*
+ * The caller asked us to use
+ * the "protections" as a mask,
+ * so restrict "protections" to
+ * what this mapping actually
+ * allows.
+ */
+ protections &= next_entry->max_protection;
+ }
if(((next_entry->max_protection)
& protections) != protections) {
break;
/* under us. */
if ((map_entry->needs_copy || object->shadowed ||
- (object->size > total_size))
+ (object->vo_size > total_size))
&& !object->true_share) {
/*
* We have to unlock the VM object before
/* create a shadow object */
vm_object_shadow(&map_entry->object.vm_object,
- &map_entry->offset, total_size);
+ &map_entry->offset, total_size);
shadow_object = map_entry->object.vm_object;
vm_object_unlock(object);
prot = map_entry->protection & ~VM_PROT_WRITE;
-#ifdef STACK_ONLY_NX
- if (map_entry->alias != VM_MEMORY_STACK && prot)
+
+ if (override_nx(target_map, map_entry->alias) && prot)
prot |= VM_PROT_EXECUTE;
-#endif
+
vm_object_pmap_protect(
object, map_entry->offset,
total_size,
- map_entry->vme_start);
next_entry = map_entry->vme_next;
map_entry->needs_copy = FALSE;
+
+ vm_object_lock(shadow_object);
while (total_size) {
if(next_entry->object.vm_object == object) {
- shadow_object->ref_count++;
- vm_object_res_reference(shadow_object);
+ vm_object_reference_locked(shadow_object);
next_entry->object.vm_object
= shadow_object;
vm_object_deallocate(object);
+ map_entry->offset;
vm_map_lock_write_to_read(target_map);
- vm_object_lock(object);
-
}
}
if(real_map != target_map)
vm_map_unlock_read(real_map);
- if(object->wimg_bits != wimg_mode) {
- vm_page_t p;
-
- vm_object_paging_wait(object, THREAD_UNINT);
-
- if ((wimg_mode == VM_WIMG_IO)
- || (wimg_mode == VM_WIMG_WCOMB))
- cache_attr = TRUE;
- else
- cache_attr = FALSE;
-
- queue_iterate(&object->memq,
- p, vm_page_t, listq) {
- if (!p->fictitious) {
- pmap_disconnect(p->phys_page);
- if (cache_attr)
- pmap_sync_page_attributes_phys(p->phys_page);
- }
- }
- object->wimg_bits = wimg_mode;
- }
+ if (object->wimg_bits != wimg_mode)
+ vm_object_change_wimg_mode(object, wimg_mode);
/* the size of mapped entry that overlaps with our region */
/* which is targeted for share. */
user_entry->is_sub_map = FALSE;
user_entry->is_pager = FALSE;
user_entry->offset = obj_off;
- user_entry->protection = permission;
+ user_entry->protection = protections;
+ SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
user_entry->size = map_size;
/* user_object pager and internal fields are not used */
goto make_mem_done;
}
+ if (mask_protections) {
+ /*
+ * The caller asked us to use the "protections" as
+ * a mask, so restrict "protections" to what this
+ * mapping actually allows.
+ */
+ protections &= parent_entry->protection;
+ }
if((protections & parent_entry->protection) != protections) {
kr = KERN_PROTECTION_FAILURE;
goto make_mem_done;
make_mem_done:
if (user_handle != IP_NULL) {
- ipc_port_dealloc_kernel(user_handle);
- }
- if (user_entry != NULL) {
- kfree(user_entry, sizeof *user_entry);
+ /*
+ * Releasing "user_handle" causes the kernel object
+ * associated with it ("user_entry" here) to also be
+ * released and freed.
+ */
+ mach_memory_entry_port_release(user_handle);
}
return kr;
}
ipc_port_t *object_handle,
ipc_port_t parent_entry)
{
- memory_object_offset_t mo_size;
+ memory_object_size_t mo_size;
kern_return_t kr;
- mo_size = (memory_object_offset_t)*size;
+ mo_size = (memory_object_size_t)*size;
kr = mach_make_memory_entry_64(target_map, &mo_size,
(memory_object_offset_t)offset, permission, object_handle,
parent_entry);
ipc_port_t *object_handle,
ipc_port_t parent_entry)
{
- memory_object_offset_t mo_size;
+ memory_object_size_t mo_size;
kern_return_t kr;
- mo_size = (memory_object_offset_t)*size;
+ mo_size = (memory_object_size_t)*size;
kr = mach_make_memory_entry_64(target_map, &mo_size,
(memory_object_offset_t)offset, permission, object_handle,
parent_entry);
user_entry->backing.pager = NULL;
user_entry->is_sub_map = FALSE;
user_entry->is_pager = FALSE;
- user_entry->size = 0;
user_entry->internal = FALSE;
+ user_entry->size = 0;
+ user_entry->offset = 0;
+ user_entry->protection = VM_PROT_NONE;
user_entry->ref_count = 1;
ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
return KERN_INVALID_ARGUMENT;
}
+ if (control != VM_PURGABLE_SET_STATE &&
+ control != VM_PURGABLE_GET_STATE)
+ return(KERN_INVALID_ARGUMENT);
+
+ if (control == VM_PURGABLE_SET_STATE &&
+ (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
+ ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
+ return(KERN_INVALID_ARGUMENT);
mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
vm_object_lock(object);
/* check that named entry covers entire object ? */
- if (mem_entry->offset != 0 || object->size != mem_entry->size) {
+ if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
vm_object_unlock(object);
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
#endif /* MACH_ASSERT */
named_entry = (vm_named_entry_t)port->ip_kobject;
- mutex_lock(&(named_entry)->Lock);
+ lck_mtx_lock(&(named_entry)->Lock);
named_entry->ref_count -= 1;
if(named_entry->ref_count == 0) {
if (named_entry->is_sub_map) {
vm_object_deallocate(named_entry->backing.object);
} /* else JMM - need to drop reference on pager in that case */
- mutex_unlock(&(named_entry)->Lock);
+ lck_mtx_unlock(&(named_entry)->Lock);
kfree((void *) port->ip_kobject,
sizeof (struct vm_named_entry));
} else
- mutex_unlock(&(named_entry)->Lock);
+ lck_mtx_unlock(&(named_entry)->Lock);
}
/* Allow manipulation of individual page state. This is actually part of */
offset_beg,
offset_end,
ops,
- range);
+ (uint32_t *) range);
vm_object_deallocate(object);
if (flags & UPL_COMMIT_FREE_ON_EMPTY)
flags |= UPL_COMMIT_NOTIFY_EMPTY;
+ if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
/* If they are not present in the object they will */
/* have to be picked up from the pager through the */
/* fault mechanism. */
- if(entry->object.vm_object->shadow_offset == 0) {
+ if(entry->object.vm_object->vo_shadow_offset == 0) {
/* need to call vm_fault */
vm_map_unlock(map);
vm_fault(map, map_offset, VM_PROT_NONE,
}
offset = entry->offset + (map_offset - entry->vme_start);
phys_page = (ppnum_t)
- ((entry->object.vm_object->shadow_offset
+ ((entry->object.vm_object->vo_shadow_offset
+ offset) >> 12);
break;
vm_object_t old_object;
vm_object_lock(object->shadow);
old_object = object;
- offset = offset + object->shadow_offset;
+ offset = offset + object->vo_shadow_offset;
object = object->shadow;
vm_object_unlock(old_object);
} else {
kern_return_t kernel_object_iopl_request( /* forward */
vm_named_entry_t named_entry,
memory_object_offset_t offset,
- vm_size_t *upl_size,
+ upl_size_t *upl_size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
kernel_object_iopl_request(
vm_named_entry_t named_entry,
memory_object_offset_t offset,
- vm_size_t *upl_size,
+ upl_size_t *upl_size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
if(*upl_size == 0) {
if(offset >= named_entry->size)
return(KERN_INVALID_RIGHT);
- *upl_size = named_entry->size - offset;
+ *upl_size = (upl_size_t) (named_entry->size - offset);
+ if (*upl_size != named_entry->size - offset)
+ return KERN_INVALID_ARGUMENT;
}
if(caller_flags & UPL_COPYOUT_FROM) {
if((named_entry->protection & VM_PROT_READ)