#include <mach/vm_param.h>
#include <mach/vm_statistics.h>
#include <mach/mach_syscalls.h>
+#include <mach/sdt.h>
#include <mach/host_priv_server.h>
#include <mach/mach_vm_server.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <vm/vm_purgeable_internal.h>
+#include <vm/vm_init.h>
+
+#include <san/kasan.h>
vm_size_t upl_offset_to_pagelist = 0;
#include <vm/cpm.h>
#endif /* VM_CPM */
-ipc_port_t dynamic_pager_control_port=NULL;
-
/*
* mach_vm_allocate allocates "zero fill" memory in the specfied
* map.
*/
kern_return_t
-mach_vm_allocate(
+mach_vm_allocate_external(
vm_map_t map,
mach_vm_offset_t *addr,
mach_vm_size_t size,
int flags)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return (mach_vm_allocate_kernel(map, addr, size, flags, tag));
+}
+
+kern_return_t
+mach_vm_allocate_kernel(
+ vm_map_t map,
+ mach_vm_offset_t *addr,
+ mach_vm_size_t size,
+ int flags,
+ vm_tag_t tag)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
map_size,
(vm_map_offset_t)0,
flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
VM_OBJECT_NULL,
(vm_object_offset_t)0,
FALSE,
* map (which is limited to the same size as the kernel).
*/
kern_return_t
-vm_allocate(
+vm_allocate_external(
vm_map_t map,
vm_offset_t *addr,
vm_size_t size,
int flags)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return (vm_allocate_kernel(map, addr, size, flags, tag));
+}
+
+kern_return_t
+vm_allocate_kernel(
+ vm_map_t map,
+ vm_offset_t *addr,
+ vm_size_t size,
+ int flags,
+ vm_tag_t tag)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
map_size,
(vm_map_offset_t)0,
flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
VM_OBJECT_NULL,
(vm_object_offset_t)0,
FALSE,
VM_PROT_ALL,
VM_INHERIT_DEFAULT);
+#if KASAN
+ if (result == KERN_SUCCESS && map->pmap == kernel_pmap) {
+ kasan_notify_address(map_addr, map_size);
+ }
+#endif
+
*addr = CAST_DOWN(vm_offset_t, map_addr);
return(result);
}
*/
kern_return_t
vm_deallocate(
- register vm_map_t map,
+ vm_map_t map,
vm_offset_t start,
vm_size_t size)
{
*/
kern_return_t
vm_inherit(
- register vm_map_t map,
+ vm_map_t map,
vm_offset_t start,
vm_size_t size,
vm_inherit_t new_inheritance)
*
*/
kern_return_t
-mach_vm_map(
+mach_vm_map_external(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t initial_size,
+ mach_vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return (mach_vm_map_kernel(target_map, address, initial_size, mask, flags, tag, port,
+ offset, copy, cur_protection, max_protection, inheritance));
+}
+
+kern_return_t
+mach_vm_map_kernel(
vm_map_t target_map,
mach_vm_offset_t *address,
mach_vm_size_t initial_size,
mach_vm_offset_t mask,
int flags,
+ vm_tag_t tag,
ipc_port_t port,
vm_object_offset_t offset,
boolean_t copy,
return KERN_INVALID_ARGUMENT;
kr = vm_map_enter_mem_object(target_map,
- &vmmaddr,
- initial_size,
- mask,
- flags,
- port,
- offset,
- copy,
- cur_protection,
- max_protection,
- inheritance);
+ &vmmaddr,
+ initial_size,
+ mask,
+ flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ port,
+ offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+
+#if KASAN
+ if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) {
+ kasan_notify_address(vmmaddr, initial_size);
+ }
+#endif
*address = vmmaddr;
return kr;
/* legacy interface */
kern_return_t
-vm_map_64(
+vm_map_64_external(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return (vm_map_64_kernel(target_map, address, size, mask, flags, tag, port, offset,
+ copy, cur_protection, max_protection, inheritance));
+}
+
+kern_return_t
+vm_map_64_kernel(
vm_map_t target_map,
vm_offset_t *address,
vm_size_t size,
vm_offset_t mask,
int flags,
+ vm_tag_t tag,
ipc_port_t port,
vm_object_offset_t offset,
boolean_t copy,
map_size = (mach_vm_size_t)size;
map_mask = (mach_vm_offset_t)mask;
- kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
+ kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, flags, tag,
port, offset, copy,
cur_protection, max_protection, inheritance);
*address = CAST_DOWN(vm_offset_t, map_addr);
/* temporary, until world build */
kern_return_t
-vm_map(
+vm_map_external(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return (vm_map_kernel(target_map, address, size, mask, flags, tag, port, offset, copy, cur_protection, max_protection, inheritance));
+}
+
+kern_return_t
+vm_map_kernel(
vm_map_t target_map,
vm_offset_t *address,
vm_size_t size,
vm_offset_t mask,
int flags,
+ vm_tag_t tag,
ipc_port_t port,
vm_offset_t offset,
boolean_t copy,
map_mask = (mach_vm_offset_t)mask;
obj_offset = (vm_object_offset_t)offset;
- kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags,
+ kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, flags, tag,
port, obj_offset, copy,
cur_protection, max_protection, inheritance);
*address = CAST_DOWN(vm_offset_t, map_addr);
* over top of itself (with altered permissions and/or
* as an in-place copy of itself).
*/
+kern_return_t
+mach_vm_remap_external(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_map_t src_map,
+ mach_vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection,
+ vm_prot_t *max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+ VM_GET_FLAGS_ALIAS(flags, tag);
+
+ return (mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address,
+ copy, cur_protection, max_protection, inheritance));
+}
kern_return_t
-mach_vm_remap(
+mach_vm_remap_kernel(
vm_map_t target_map,
mach_vm_offset_t *address,
mach_vm_size_t size,
mach_vm_offset_t mask,
int flags,
+ vm_tag_t tag,
vm_map_t src_map,
mach_vm_offset_t memory_address,
boolean_t copy,
size,
mask,
flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
src_map,
memory_address,
copy,
* kernel context).
*/
kern_return_t
-vm_remap(
+vm_remap_external(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ vm_map_t src_map,
+ vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection,
+ vm_prot_t *max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+ VM_GET_FLAGS_ALIAS(flags, tag);
+
+ return (vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map,
+ memory_address, copy, cur_protection, max_protection, inheritance));
+}
+
+kern_return_t
+vm_remap_kernel(
vm_map_t target_map,
vm_offset_t *address,
vm_size_t size,
vm_offset_t mask,
int flags,
+ vm_tag_t tag,
vm_map_t src_map,
vm_offset_t memory_address,
boolean_t copy,
size,
mask,
flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
src_map,
memory_address,
copy,
* [ To unwire the pages, specify VM_PROT_NONE. ]
*/
kern_return_t
-mach_vm_wire(
+mach_vm_wire_external(
host_priv_t host_priv,
vm_map_t map,
mach_vm_offset_t start,
mach_vm_size_t size,
vm_prot_t access)
+{
+ return (mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK));
+}
+
+kern_return_t
+mach_vm_wire_kernel(
+ host_priv_t host_priv,
+ vm_map_t map,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ vm_prot_t access,
+ vm_tag_t tag)
{
kern_return_t rc;
return KERN_INVALID_ARGUMENT;
if (access != VM_PROT_NONE) {
- rc = vm_map_wire(map,
+ rc = vm_map_wire_kernel(map,
vm_map_trunc_page(start,
VM_MAP_PAGE_MASK(map)),
vm_map_round_page(start+size,
VM_MAP_PAGE_MASK(map)),
- access,
+ access, tag,
TRUE);
} else {
rc = vm_map_unwire(map,
kern_return_t
vm_wire(
host_priv_t host_priv,
- register vm_map_t map,
+ vm_map_t map,
vm_offset_t start,
vm_size_t size,
vm_prot_t access)
if (size == 0) {
rc = KERN_SUCCESS;
} else if (access != VM_PROT_NONE) {
- rc = vm_map_wire(map,
+ rc = vm_map_wire_kernel(map,
vm_map_trunc_page(start,
VM_MAP_PAGE_MASK(map)),
vm_map_round_page(start+size,
VM_MAP_PAGE_MASK(map)),
- access,
+ access, VM_KERN_MEMORY_OSFMK,
TRUE);
} else {
rc = vm_map_unwire(map,
{
vm_map_t map = current_map();
+ assert(!map->is_nested_map);
if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){
*old_value = map->disable_vmentry_reuse;
} else if(toggle == VM_TOGGLE_SET){
+ vm_map_entry_t map_to_entry;
+
vm_map_lock(map);
+ vm_map_disable_hole_optimization(map);
map->disable_vmentry_reuse = TRUE;
- if (map->first_free == vm_map_to_entry(map)) {
+ __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
+ if (map->first_free == map_to_entry) {
map->highest_entry_end = vm_map_min(map);
} else {
map->highest_entry_end = map->first_free->vme_end;
mach_vm_behavior_set(
vm_map_t map,
mach_vm_offset_t start,
- mach_vm_size_t size,
+ mach_vm_size_t size,
vm_behavior_t new_behavior)
{
+ vm_map_offset_t align_mask;
+
if ((map == VM_MAP_NULL) || (start + size < start))
return(KERN_INVALID_ARGUMENT);
if (size == 0)
return KERN_SUCCESS;
- return(vm_map_behavior_set(map,
- vm_map_trunc_page(start,
- VM_MAP_PAGE_MASK(map)),
- vm_map_round_page(start+size,
- VM_MAP_PAGE_MASK(map)),
- new_behavior));
+ switch (new_behavior) {
+ case VM_BEHAVIOR_REUSABLE:
+ case VM_BEHAVIOR_REUSE:
+ case VM_BEHAVIOR_CAN_REUSE:
+ /*
+ * Align to the hardware page size, to allow
+ * malloc() to maximize the amount of re-usability,
+ * even on systems with larger software page size.
+ */
+ align_mask = PAGE_MASK;
+ break;
+ default:
+ align_mask = VM_MAP_PAGE_MASK(map);
+ break;
+ }
+
+ return vm_map_behavior_set(map,
+ vm_map_trunc_page(start, align_mask),
+ vm_map_round_page(start+size, align_mask),
+ new_behavior);
}
/*
vm_size_t size,
vm_behavior_t new_behavior)
{
- if ((map == VM_MAP_NULL) || (start + size < start))
- return(KERN_INVALID_ARGUMENT);
-
- if (size == 0)
- return KERN_SUCCESS;
+ if (start + size < start)
+ return KERN_INVALID_ARGUMENT;
- return(vm_map_behavior_set(map,
- vm_map_trunc_page(start,
- VM_MAP_PAGE_MASK(map)),
- vm_map_round_page(start+size,
- VM_MAP_PAGE_MASK(map)),
- new_behavior));
+ return mach_vm_behavior_set(map,
+ (mach_vm_offset_t) start,
+ (mach_vm_size_t) size,
+ new_behavior);
}
/*
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
+ if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+ /* not allowed from user-space */
+ return KERN_INVALID_ARGUMENT;
+ }
+
return vm_map_purgable_control(map,
vm_map_trunc_page(address, PAGE_MASK),
control,
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
+ if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+ /* not allowed from user-space */
+ return KERN_INVALID_ARGUMENT;
+ }
+
return vm_map_purgable_control(map,
vm_map_trunc_page(address, PAGE_MASK),
control,
disposition, ref_count);
}
+kern_return_t
+mach_vm_page_range_query(
+ vm_map_t map,
+ mach_vm_offset_t address,
+ mach_vm_size_t size,
+ mach_vm_address_t dispositions_addr,
+ mach_vm_size_t *dispositions_count)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ int num_pages = 0, i = 0;
+ mach_vm_size_t curr_sz = 0, copy_sz = 0;
+ mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0;
+ mach_msg_type_number_t count = 0;
+
+ void *info = NULL;
+ void *local_disp = NULL;;
+ vm_map_size_t info_size = 0, local_disp_size = 0;
+ mach_vm_offset_t start = 0, end = 0;
+
+ if (map == VM_MAP_NULL || dispositions_count == NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ disp_buf_req_size = ( *dispositions_count * sizeof(int));
+ start = mach_vm_trunc_page(address);
+ end = mach_vm_round_page(address + size);
+
+ if (end < start) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (disp_buf_req_size == 0 || (end == start)) {
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * For large requests, we will go through them
+ * MAX_PAGE_RANGE_QUERY chunk at a time.
+ */
+
+ curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY);
+ num_pages = (int) (curr_sz >> PAGE_SHIFT);
+
+ info_size = num_pages * sizeof(vm_page_info_basic_data_t);
+ info = kalloc(info_size);
+
+ if (info == NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ local_disp_size = num_pages * sizeof(int);
+ local_disp = kalloc(local_disp_size);
+
+ if (local_disp == NULL) {
+
+ kfree(info, info_size);
+ info = NULL;
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ while (size) {
+
+ count = VM_PAGE_INFO_BASIC_COUNT;
+ kr = vm_map_page_range_info_internal(
+ map,
+ start,
+ mach_vm_round_page(start + curr_sz),
+ VM_PAGE_INFO_BASIC,
+ (vm_page_info_t) info,
+ &count);
+
+ assert(kr == KERN_SUCCESS);
+
+ for (i = 0; i < num_pages; i++) {
+
+ ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
+ }
+
+ copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int)/* an int per page */);
+ kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
+
+ start += curr_sz;
+ disp_buf_req_size -= copy_sz;
+ disp_buf_total_size += copy_sz;
+
+ if (kr != 0) {
+ break;
+ }
+
+ if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
+
+ /*
+ * We might have inspected the full range OR
+ * more than it esp. if the user passed in
+ * non-page aligned start/size and/or if we
+ * descended into a submap. We are done here.
+ */
+
+ size = 0;
+
+ } else {
+
+ dispositions_addr += copy_sz;
+
+ size -= curr_sz;
+
+ curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY);
+ num_pages = (int)(curr_sz >> PAGE_SHIFT);
+ }
+ }
+
+ *dispositions_count = disp_buf_total_size / sizeof(int);
+
+ kfree(local_disp, local_disp_size);
+ local_disp = NULL;
+
+ kfree(info, info_size);
+ info = NULL;
+
+ return kr;
+}
+
kern_return_t
mach_vm_page_info(
vm_map_t map,
upl_t *upl,
upl_page_info_array_t page_list,
unsigned int *count,
- int *flags,
+ upl_control_flags_t *flags,
+ vm_tag_t tag,
int force_data_sync)
{
- int map_flags;
- kern_return_t kr;
+ upl_control_flags_t map_flags;
+ kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
upl,
page_list,
count,
- &map_flags);
+ &map_flags,
+ tag);
*flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
return kr;
}
+#if CONFIG_EMBEDDED
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
+int cs_executable_mem_entry = 0;
+int log_executable_mem_entry = 0;
+#endif /* CONFIG_EMBEDDED */
+
/*
* mach_make_memory_entry_64
*
* somewhere else. Rather than doing it all at once (and
* without needing access to the other whole map).
*/
-
kern_return_t
mach_make_memory_entry_64(
vm_map_t target_map,
vm_prot_t permission,
ipc_port_t *object_handle,
ipc_port_t parent_handle)
+{
+ if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
+ /*
+ * Unknown flag: reject for forward compatibility.
+ */
+ return KERN_INVALID_VALUE;
+ }
+
+ return mach_make_memory_entry_internal(target_map,
+ size,
+ offset,
+ permission,
+ object_handle,
+ parent_handle);
+}
+
+extern int pacified_purgeable_iokit;
+
+kern_return_t
+mach_make_memory_entry_internal(
+ vm_map_t target_map,
+ memory_object_size_t *size,
+ memory_object_offset_t offset,
+ vm_prot_t permission,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_handle)
{
vm_map_version_t version;
vm_named_entry_t parent_entry;
/* needed for call to vm_map_lookup_locked */
boolean_t wired;
+ boolean_t iskernel;
vm_object_offset_t obj_off;
vm_prot_t prot;
struct vm_object_fault_info fault_info;
vm_map_entry_t next_entry;
vm_map_t local_map;
vm_map_t original_map = target_map;
- vm_map_size_t total_size;
- vm_map_size_t map_size;
- vm_map_offset_t map_offset;
+ vm_map_size_t total_size, map_size;
+ vm_map_offset_t map_start, map_end;
vm_map_offset_t local_offset;
vm_object_size_t mappable_size;
boolean_t force_shadow = FALSE;
boolean_t use_data_addr;
+ boolean_t use_4K_compat;
- if (((permission & 0x00FF0000) &
- ~(MAP_MEM_ONLY |
- MAP_MEM_NAMED_CREATE |
- MAP_MEM_PURGABLE |
- MAP_MEM_NAMED_REUSE |
- MAP_MEM_USE_DATA_ADDR |
- MAP_MEM_VM_COPY |
- MAP_MEM_VM_SHARE))) {
+ if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) {
/*
* Unknown flag: reject for forward compatibility.
*/
mask_protections = permission & VM_PROT_IS_MASK;
access = GET_MAP_MEM(permission);
use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
+ use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0);
user_handle = IP_NULL;
user_entry = NULL;
- map_offset = vm_map_trunc_page(offset, PAGE_MASK);
+ map_start = vm_map_trunc_page(offset, PAGE_MASK);
if (permission & MAP_MEM_ONLY) {
boolean_t parent_is_object;
- map_size = vm_map_round_page(*size, PAGE_MASK);
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
- if (use_data_addr || parent_entry == NULL) {
+ if (use_data_addr || use_4K_compat || parent_entry == NULL) {
return KERN_INVALID_ARGUMENT;
}
- parent_is_object = !(parent_entry->is_sub_map ||
- parent_entry->is_pager);
+ parent_is_object = !parent_entry->is_sub_map;
object = parent_entry->backing.object;
if(parent_is_object && object != VM_OBJECT_NULL)
wimg_mode = object->wimg_bits;
!(parent_entry->protection & VM_PROT_WRITE)) {
return KERN_INVALID_RIGHT;
}
- if(access == MAP_MEM_IO) {
- SET_MAP_MEM(access, parent_entry->protection);
- wimg_mode = VM_WIMG_IO;
- } else if (access == MAP_MEM_COPYBACK) {
- SET_MAP_MEM(access, parent_entry->protection);
- wimg_mode = VM_WIMG_USE_DEFAULT;
- } else if (access == MAP_MEM_INNERWBACK) {
- SET_MAP_MEM(access, parent_entry->protection);
- wimg_mode = VM_WIMG_INNERWBACK;
- } else if (access == MAP_MEM_WTHRU) {
- SET_MAP_MEM(access, parent_entry->protection);
- wimg_mode = VM_WIMG_WTHRU;
- } else if (access == MAP_MEM_WCOMB) {
- SET_MAP_MEM(access, parent_entry->protection);
- wimg_mode = VM_WIMG_WCOMB;
- }
+ vm_prot_to_wimg(access, &wimg_mode);
+ if (access != MAP_MEM_NOOP)
+ SET_MAP_MEM(access, parent_entry->protection);
if (parent_is_object && object &&
(access != MAP_MEM_NOOP) &&
(!(object->nophyscache))) {
*object_handle = IP_NULL;
return KERN_SUCCESS;
} else if (permission & MAP_MEM_NAMED_CREATE) {
- map_size = vm_map_round_page(*size, PAGE_MASK);
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
- if (use_data_addr) {
+ if (use_data_addr || use_4K_compat) {
return KERN_INVALID_ARGUMENT;
}
goto make_mem_done;
}
object->purgable = VM_PURGABLE_NONVOLATILE;
+ if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) {
+ object->purgeable_only_by_kernel = TRUE;
+ }
assert(object->vo_purgeable_owner == NULL);
assert(object->resident_page_count == 0);
assert(object->wired_page_count == 0);
vm_object_lock(object);
- vm_purgeable_nonvolatile_enqueue(object,
- current_task());
+ if (pacified_purgeable_iokit) {
+ if (permission & MAP_MEM_LEDGER_TAG_NETWORK) {
+ vm_purgeable_nonvolatile_enqueue(object,
+ kernel_task);
+ } else {
+ vm_purgeable_nonvolatile_enqueue(object,
+ current_task());
+ }
+ } else {
+ if (object->purgeable_only_by_kernel) {
+ vm_purgeable_nonvolatile_enqueue(object,
+ kernel_task);
+ } else {
+ vm_purgeable_nonvolatile_enqueue(object,
+ current_task());
+ }
+ }
vm_object_unlock(object);
}
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_iokit && /* global boot-arg */
+ ((permission & MAP_MEM_GRAB_SECLUDED)
+#if 11
+ /* XXX FBDP for my testing only */
+ || (secluded_for_fbdp && map_size == 97550336)
+#endif
+ )) {
+#if 11
+ if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
+ secluded_for_fbdp) {
+ printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size);
+ }
+#endif
+ object->can_grab_secluded = TRUE;
+ assert(!object->eligible_for_secluded);
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
/*
* The VM object is brand new and nobody else knows about it,
* so we don't need to lock it.
*/
wimg_mode = object->wimg_bits;
- if (access == MAP_MEM_IO) {
- wimg_mode = VM_WIMG_IO;
- } else if (access == MAP_MEM_COPYBACK) {
- wimg_mode = VM_WIMG_USE_DEFAULT;
- } else if (access == MAP_MEM_INNERWBACK) {
- wimg_mode = VM_WIMG_INNERWBACK;
- } else if (access == MAP_MEM_WTHRU) {
- wimg_mode = VM_WIMG_WTHRU;
- } else if (access == MAP_MEM_WCOMB) {
- wimg_mode = VM_WIMG_WCOMB;
- }
- if (access != MAP_MEM_NOOP) {
- object->wimg_bits = wimg_mode;
- }
+ vm_prot_to_wimg(access, &wimg_mode);
+ if (access != MAP_MEM_NOOP) {
+ object->wimg_bits = wimg_mode;
+ }
+
/* the object has no pages, so no WIMG bits to update here */
/*
user_entry->backing.object = object;
user_entry->internal = TRUE;
user_entry->is_sub_map = FALSE;
- user_entry->is_pager = FALSE;
user_entry->offset = 0;
user_entry->data_offset = 0;
user_entry->protection = protections;
/* user_object pager and internal fields are not used */
/* when the object field is filled in. */
- *size = CAST_DOWN(vm_size_t, map_size);
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
*object_handle = user_handle;
return KERN_SUCCESS;
}
return KERN_INVALID_TASK;
}
- if (use_data_addr) {
- map_size = (vm_map_round_page(offset + *size,
- PAGE_MASK) -
- map_offset);
- offset_in_page = offset - map_offset;
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat)
+ offset_in_page &= ~((signed)(0xFFF));
} else {
- map_size = vm_map_round_page(*size, PAGE_MASK);
offset_in_page = 0;
}
- kr = vm_map_copyin(target_map,
- map_offset,
- map_size,
- FALSE,
- ©);
+ kr = vm_map_copyin_internal(target_map,
+ map_start,
+ map_size,
+ VM_MAP_COPYIN_ENTRY_LIST,
+ ©);
if (kr != KERN_SUCCESS) {
return kr;
}
user_entry->backing.copy = copy;
user_entry->internal = FALSE;
user_entry->is_sub_map = FALSE;
- user_entry->is_pager = FALSE;
user_entry->is_copy = TRUE;
user_entry->offset = 0;
user_entry->protection = protections;
user_entry->size = map_size;
user_entry->data_offset = offset_in_page;
- *size = CAST_DOWN(vm_size_t, map_size);
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
*object_handle = user_handle;
return KERN_SUCCESS;
}
return KERN_INVALID_TASK;
}
- if (use_data_addr) {
- map_size = (vm_map_round_page(offset + *size,
- PAGE_MASK) -
- map_offset);
- offset_in_page = offset - map_offset;
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat)
+ offset_in_page &= ~((signed)(0xFFF));
} else {
- map_size = vm_map_round_page(*size, PAGE_MASK);
offset_in_page = 0;
}
+ cur_prot = VM_PROT_ALL;
kr = vm_map_copy_extract(target_map,
- map_offset,
+ map_start,
map_size,
©,
&cur_prot,
user_entry->backing.copy = copy;
user_entry->internal = FALSE;
user_entry->is_sub_map = FALSE;
- user_entry->is_pager = FALSE;
user_entry->is_copy = TRUE;
user_entry->offset = 0;
user_entry->protection = protections;
user_entry->size = map_size;
user_entry->data_offset = offset_in_page;
- *size = CAST_DOWN(vm_size_t, map_size);
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
*object_handle = user_handle;
return KERN_SUCCESS;
}
if (parent_entry == NULL ||
(permission & MAP_MEM_NAMED_REUSE)) {
- if (use_data_addr) {
- map_size = vm_map_round_page(offset + *size, PAGE_MASK) - map_offset;
- offset_in_page = offset - map_offset;
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat)
+ offset_in_page &= ~((signed)(0xFFF));
} else {
- map_size = vm_map_round_page(*size, PAGE_MASK);
offset_in_page = 0;
}
/* note we check the permission of the range against */
/* that requested by the caller */
- kr = vm_map_lookup_locked(&target_map, map_offset,
+ kr = vm_map_lookup_locked(&target_map, map_start,
protections | mask_protections,
OBJECT_LOCK_EXCLUSIVE, &version,
&object, &obj_off, &prot, &wired,
*/
protections &= prot;
}
+#if CONFIG_EMBEDDED
+ /*
+ * Wiring would copy the pages to a shadow object.
+ * The shadow object would not be code-signed so
+ * attempting to execute code from these copied pages
+ * would trigger a code-signing violation.
+ */
+ if (prot & VM_PROT_EXECUTE) {
+ if (log_executable_mem_entry) {
+ void *bsd_info;
+ bsd_info = current_task()->bsd_info;
+ printf("pid %d[%s] making memory entry out of "
+ "executable range from 0x%llx to 0x%llx:"
+ "might cause code-signing issues "
+ "later\n",
+ proc_selfpid(),
+ (bsd_info != NULL
+ ? proc_name_address(bsd_info)
+ : "?"),
+ (uint64_t) map_start,
+ (uint64_t) map_end);
+ }
+ DTRACE_VM2(cs_executable_mem_entry,
+ uint64_t, (uint64_t)map_start,
+ uint64_t, (uint64_t)map_end);
+ cs_executable_mem_entry++;
+
+#if 11
+ /*
+ * We don't know how the memory entry will be used.
+ * It might never get wired and might not cause any
+ * trouble, so let's not reject this request...
+ */
+#else /* 11 */
+ kr = KERN_PROTECTION_FAILURE;
+ vm_object_unlock(object);
+ vm_map_unlock_read(target_map);
+ if(real_map != target_map)
+ vm_map_unlock_read(real_map);
+ goto make_mem_done;
+#endif /* 11 */
+
+ }
+#endif /* CONFIG_EMBEDDED */
+
if (((prot & protections) != protections)
- || (object == kernel_object)) {
+ || (object == kernel_object)) {
kr = KERN_INVALID_RIGHT;
vm_object_unlock(object);
vm_map_unlock_read(target_map);
vm_object_unlock(object);
local_map = original_map;
- local_offset = map_offset;
+ local_offset = map_start;
if(target_map != local_map) {
vm_map_unlock_read(target_map);
if(real_map != target_map)
object = VM_OBJECT_NULL;
goto make_mem_done;
}
+ iskernel = (local_map->pmap == kernel_pmap);
if(!(map_entry->is_sub_map)) {
- if(map_entry->object.vm_object != object) {
+ if (VME_OBJECT(map_entry) != object) {
kr = KERN_INVALID_ARGUMENT;
vm_map_unlock_read(target_map);
if(real_map != target_map)
} else {
vm_map_t tmap;
tmap = local_map;
- local_map = map_entry->object.sub_map;
+ local_map = VME_SUBMAP(map_entry);
vm_map_lock_read(local_map);
vm_map_unlock_read(tmap);
target_map = local_map;
real_map = local_map;
local_offset = local_offset - map_entry->vme_start;
- local_offset += map_entry->offset;
+ local_offset += VME_OFFSET(map_entry);
}
}
/* lets see if the next map entry is still */
/* pointing at this object and is contiguous */
while(map_size > mappable_size) {
- if((next_entry->object.vm_object == object) &&
- (next_entry->vme_start ==
- next_entry->vme_prev->vme_end) &&
- (next_entry->offset ==
- next_entry->vme_prev->offset +
- (next_entry->vme_prev->vme_end -
- next_entry->vme_prev->vme_start))) {
+ if ((VME_OBJECT(next_entry) == object) &&
+ (next_entry->vme_start ==
+ next_entry->vme_prev->vme_end) &&
+ (VME_OFFSET(next_entry) ==
+ (VME_OFFSET(next_entry->vme_prev) +
+ (next_entry->vme_prev->vme_end -
+ next_entry->vme_prev->vme_start)))) {
if (mask_protections) {
/*
* The caller asked us to use
}
}
- if (vm_map_entry_should_cow_for_true_share(map_entry) &&
+ /* vm_map_entry_should_cow_for_true_share() checks for malloc tags,
+ * never true in kernel */
+ if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) &&
object->vo_size > map_size &&
map_size != 0) {
/*
vm_map_clip_start(target_map,
map_entry,
- vm_map_trunc_page(offset,
+ vm_map_trunc_page(map_start,
VM_MAP_PAGE_MASK(target_map)));
vm_map_clip_end(target_map,
map_entry,
- (vm_map_round_page(offset + map_size,
+ (vm_map_round_page(map_end,
VM_MAP_PAGE_MASK(target_map))));
force_shadow = TRUE;
if ((map_entry->vme_end - offset) < map_size) {
- map_size = map_entry->vme_end - offset;
+ map_size = map_entry->vme_end - map_start;
}
total_size = map_entry->vme_end - map_entry->vme_start;
((map_entry->needs_copy ||
object->shadowed ||
(object->vo_size > total_size &&
- (map_entry->offset != 0 ||
+ (VME_OFFSET(map_entry) != 0 ||
object->vo_size >
vm_map_round_page(total_size,
VM_MAP_PAGE_MASK(target_map)))))
*/
/* create a shadow object */
- vm_object_shadow(&map_entry->object.vm_object,
- &map_entry->offset, total_size);
- shadow_object = map_entry->object.vm_object;
+ VME_OBJECT_SHADOW(map_entry, total_size);
+ shadow_object = VME_OBJECT(map_entry);
#if 00
vm_object_unlock(object);
#endif
prot = map_entry->protection & ~VM_PROT_WRITE;
- if (override_nx(target_map, map_entry->alias) && prot)
+ if (override_nx(target_map,
+ VME_ALIAS(map_entry))
+ && prot)
prot |= VM_PROT_EXECUTE;
vm_object_pmap_protect(
- object, map_entry->offset,
+ object, VME_OFFSET(map_entry),
total_size,
((map_entry->is_shared
|| target_map->mapped_in_other_pmaps)
assert((next_entry->wired_count == 0) ||
(map_entry->wired_count));
- if(next_entry->object.vm_object == object) {
+ if (VME_OBJECT(next_entry) == object) {
vm_object_reference_locked(shadow_object);
- next_entry->object.vm_object
- = shadow_object;
+ VME_OBJECT_SET(next_entry,
+ shadow_object);
vm_object_deallocate(object);
- next_entry->offset
- = next_entry->vme_prev->offset +
- (next_entry->vme_prev->vme_end
- - next_entry->vme_prev->vme_start);
+ VME_OFFSET_SET(
+ next_entry,
+ (VME_OFFSET(next_entry->vme_prev) +
+ (next_entry->vme_prev->vme_end
+ - next_entry->vme_prev->vme_start)));
+ next_entry->use_pmap = TRUE;
next_entry->needs_copy = FALSE;
} else {
panic("mach_make_memory_entry_64:"
vm_object_deallocate(object); /* extra ref */
object = shadow_object;
- obj_off = (local_offset - map_entry->vme_start)
- + map_entry->offset;
+ obj_off = ((local_offset - map_entry->vme_start)
+ + VME_OFFSET(map_entry));
vm_map_lock_write_to_read(target_map);
}
/* against delayed copy, etc. is mostly defensive. */
wimg_mode = object->wimg_bits;
- if(!(object->nophyscache)) {
- if(access == MAP_MEM_IO) {
- wimg_mode = VM_WIMG_IO;
- } else if (access == MAP_MEM_COPYBACK) {
- wimg_mode = VM_WIMG_USE_DEFAULT;
- } else if (access == MAP_MEM_INNERWBACK) {
- wimg_mode = VM_WIMG_INNERWBACK;
- } else if (access == MAP_MEM_WTHRU) {
- wimg_mode = VM_WIMG_WTHRU;
- } else if (access == MAP_MEM_WCOMB) {
- wimg_mode = VM_WIMG_WCOMB;
- }
- }
+ if(!(object->nophyscache))
+ vm_prot_to_wimg(access, &wimg_mode);
#if VM_OBJECT_TRACKING_OP_TRUESHARE
if (!object->true_share &&
}
#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+ vm_object_lock_assert_exclusive(object);
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
parent_entry->backing.object == object &&
parent_entry->internal == object->internal &&
parent_entry->is_sub_map == FALSE &&
- parent_entry->is_pager == FALSE &&
parent_entry->offset == obj_off &&
parent_entry->protection == protections &&
parent_entry->size == map_size &&
- ((!use_data_addr && (parent_entry->data_offset == 0)) ||
- (use_data_addr && (parent_entry->data_offset == offset_in_page)))) {
+ ((!(use_data_addr || use_4K_compat) &&
+ (parent_entry->data_offset == 0)) ||
+ ((use_data_addr || use_4K_compat) &&
+ (parent_entry->data_offset == offset_in_page)))) {
/*
* We have a match: re-use "parent_entry".
*/
/* Get an extra send-right on handle */
ipc_port_copy_send(parent_handle);
- *size = CAST_DOWN(vm_size_t, map_size);
+ *size = CAST_DOWN(vm_size_t,
+ (parent_entry->size -
+ parent_entry->data_offset));
*object_handle = parent_handle;
return KERN_SUCCESS;
} else {
user_entry->backing.object = object;
user_entry->internal = object->internal;
user_entry->is_sub_map = FALSE;
- user_entry->is_pager = FALSE;
user_entry->offset = obj_off;
user_entry->data_offset = offset_in_page;
user_entry->protection = protections;
/* user_object pager and internal fields are not used */
/* when the object field is filled in. */
- *size = CAST_DOWN(vm_size_t, map_size);
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
*object_handle = user_handle;
return KERN_SUCCESS;
goto make_mem_done;
}
- if (use_data_addr) {
+ if (use_data_addr || use_4K_compat) {
/*
* submaps and pagers should only be accessible from within
* the kernel, which shouldn't use the data address flag, so can fail here.
*/
- if (parent_entry->is_pager || parent_entry->is_sub_map) {
- panic("Shouldn't be using data address with a parent entry that is a submap or pager.");
+ if (parent_entry->is_sub_map) {
+ panic("Shouldn't be using data address with a parent entry that is a submap.");
}
/*
* Account for offset to data in parent entry and
goto make_mem_done;
}
- map_offset = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
- offset_in_page = (offset + parent_entry->data_offset) - map_offset;
- map_size = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK) - map_offset;
+ map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
+ offset_in_page = (offset + parent_entry->data_offset) - map_start;
+ if (use_4K_compat)
+ offset_in_page &= ~((signed)(0xFFF));
+ map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
} else {
- map_size = vm_map_round_page(*size, PAGE_MASK);
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
offset_in_page = 0;
if((offset + map_size) > parent_entry->size) {
}
user_entry->size = map_size;
- user_entry->offset = parent_entry->offset + map_offset;
+ user_entry->offset = parent_entry->offset + map_start;
user_entry->data_offset = offset_in_page;
user_entry->is_sub_map = parent_entry->is_sub_map;
- user_entry->is_pager = parent_entry->is_pager;
user_entry->is_copy = parent_entry->is_copy;
user_entry->internal = parent_entry->internal;
user_entry->protection = protections;
vm_map_lock(user_entry->backing.map);
user_entry->backing.map->ref_count++;
vm_map_unlock(user_entry->backing.map);
- }
- else if (parent_entry->is_pager) {
- user_entry->backing.pager = parent_entry->backing.pager;
- /* JMM - don't we need a reference here? */
} else {
object = parent_entry->backing.object;
assert(object != VM_OBJECT_NULL);
user_entry->backing.object = object;
/* we now point to this object, hold on */
- vm_object_reference(object);
vm_object_lock(object);
+ vm_object_reference_locked(object);
#if VM_OBJECT_TRACKING_OP_TRUESHARE
if (!object->true_share &&
vm_object_tracking_inited) {
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
vm_object_unlock(object);
}
- *size = CAST_DOWN(vm_size_t, map_size);
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
*object_handle = user_handle;
return KERN_SUCCESS;
}
return(KERN_SUCCESS);
}
+kern_return_t
+vm_map_exec_lockdown(
+ vm_map_t map)
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ vm_map_lock(map);
+ map->map_disallow_new_exec = TRUE;
+ vm_map_unlock(map);
+
+ return(KERN_SUCCESS);
+}
+
__private_extern__ kern_return_t
mach_memory_entry_allocate(
vm_named_entry_t *user_entry_p,
ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
/* nsrequest unlocks user_handle */
- user_entry->backing.pager = NULL;
+ user_entry->backing.object = NULL;
user_entry->is_sub_map = FALSE;
- user_entry->is_pager = FALSE;
user_entry->is_copy = FALSE;
user_entry->internal = FALSE;
user_entry->size = 0;
*
* Create a named entry backed by the provided pager.
*
- * JMM - we need to hold a reference on the pager -
- * and release it when the named entry is destroyed.
*/
kern_return_t
mach_memory_object_memory_entry_64(
unsigned int access;
vm_named_entry_t user_entry;
ipc_port_t user_handle;
+ vm_object_t object;
if (host == HOST_NULL)
return(KERN_INVALID_HOST);
+ if (pager == MEMORY_OBJECT_NULL && internal) {
+ object = vm_object_allocate(size);
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+ } else {
+ object = memory_object_to_vm_object(pager);
+ if (object != VM_OBJECT_NULL) {
+ vm_object_reference(object);
+ }
+ }
+ if (object == VM_OBJECT_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
if (mach_memory_entry_allocate(&user_entry, &user_handle)
!= KERN_SUCCESS) {
+ vm_object_deallocate(object);
return KERN_FAILURE;
}
- user_entry->backing.pager = pager;
user_entry->size = size;
user_entry->offset = 0;
user_entry->protection = permission & VM_PROT_ALL;
access = GET_MAP_MEM(permission);
SET_MAP_MEM(access, user_entry->protection);
- user_entry->internal = internal;
user_entry->is_sub_map = FALSE;
- user_entry->is_pager = TRUE;
assert(user_entry->ref_count == 1);
+ user_entry->backing.object = object;
+ user_entry->internal = object->internal;
+ assert(object->internal == internal);
+
*entry_handle = user_handle;
return KERN_SUCCESS;
-}
+}
kern_return_t
mach_memory_object_memory_entry(
ipc_port_t entry_port,
vm_purgable_t control,
int *state)
+{
+ if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+ /* not allowed from user-space */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return memory_entry_purgeable_control_internal(entry_port, control, state);
+}
+
+kern_return_t
+memory_entry_purgeable_control_internal(
+ ipc_port_t entry_port,
+ vm_purgable_t control,
+ int *state)
{
kern_return_t kr;
vm_named_entry_t mem_entry;
return KERN_INVALID_ARGUMENT;
}
if (control != VM_PURGABLE_SET_STATE &&
- control != VM_PURGABLE_GET_STATE)
+ control != VM_PURGABLE_GET_STATE &&
+ control != VM_PURGABLE_SET_STATE_FROM_KERNEL)
return(KERN_INVALID_ARGUMENT);
- if (control == VM_PURGABLE_SET_STATE &&
+ if ((control == VM_PURGABLE_SET_STATE ||
+ control == VM_PURGABLE_SET_STATE_FROM_KERNEL) &&
(((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) ||
((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK)))
return(KERN_INVALID_ARGUMENT);
named_entry_lock(mem_entry);
if (mem_entry->is_sub_map ||
- mem_entry->is_pager ||
mem_entry->is_copy) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
named_entry_lock(mem_entry);
if (mem_entry->is_sub_map ||
- mem_entry->is_pager ||
mem_entry->is_copy) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
if(named_entry->ref_count == 0) {
if (named_entry->is_sub_map) {
vm_map_deallocate(named_entry->backing.map);
- } else if (named_entry->is_pager) {
- /* JMM - need to drop reference on pager in that case */
} else if (named_entry->is_copy) {
vm_map_copy_discard(named_entry->backing.copy);
} else {
named_entry_lock(mem_entry);
if (mem_entry->is_sub_map ||
- mem_entry->is_pager ||
mem_entry->is_copy) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
named_entry_lock(mem_entry);
if (mem_entry->is_sub_map ||
- mem_entry->is_pager ||
mem_entry->is_copy) {
named_entry_unlock(mem_entry);
return KERN_INVALID_ARGUMENT;
return kr;
}
-
-kern_return_t
-set_dp_control_port(
- host_priv_t host_priv,
- ipc_port_t control_port)
-{
- if (host_priv == HOST_PRIV_NULL)
- return (KERN_INVALID_HOST);
-
- if (IP_VALID(dynamic_pager_control_port))
- ipc_port_release_send(dynamic_pager_control_port);
-
- dynamic_pager_control_port = control_port;
- return KERN_SUCCESS;
-}
-
-kern_return_t
-get_dp_control_port(
- host_priv_t host_priv,
- ipc_port_t *control_port)
-{
- if (host_priv == HOST_PRIV_NULL)
- return (KERN_INVALID_HOST);
-
- *control_port = ipc_port_copy_send(dynamic_pager_control_port);
- return KERN_SUCCESS;
-
-}
-
/* ******* Temporary Internal calls to UPL for BSD ***** */
extern int kernel_upl_map(
vm_map_lock(map);
while (vm_map_lookup_entry(map, map_offset, &entry)) {
- if (entry->object.vm_object == VM_OBJECT_NULL) {
+ if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
vm_map_unlock(map);
return (ppnum_t) 0;
}
if (entry->is_sub_map) {
vm_map_t old_map;
- vm_map_lock(entry->object.sub_map);
+ vm_map_lock(VME_SUBMAP(entry));
old_map = map;
- map = entry->object.sub_map;
- map_offset = entry->offset + (map_offset - entry->vme_start);
+ map = VME_SUBMAP(entry);
+ map_offset = (VME_OFFSET(entry) +
+ (map_offset - entry->vme_start));
vm_map_unlock(old_map);
continue;
}
- if (entry->object.vm_object->phys_contiguous) {
+ if (VME_OBJECT(entry)->phys_contiguous) {
/* These are not standard pageable memory mappings */
/* If they are not present in the object they will */
/* have to be picked up from the pager through the */
/* fault mechanism. */
- if(entry->object.vm_object->vo_shadow_offset == 0) {
+ if (VME_OBJECT(entry)->vo_shadow_offset == 0) {
/* need to call vm_fault */
vm_map_unlock(map);
vm_fault(map, map_offset, VM_PROT_NONE,
- FALSE, THREAD_UNINT, NULL, 0);
+ FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
+ THREAD_UNINT, NULL, 0);
vm_map_lock(map);
continue;
}
- offset = entry->offset + (map_offset - entry->vme_start);
+ offset = (VME_OFFSET(entry) +
+ (map_offset - entry->vme_start));
phys_page = (ppnum_t)
- ((entry->object.vm_object->vo_shadow_offset
- + offset) >> PAGE_SHIFT);
+ ((VME_OBJECT(entry)->vo_shadow_offset
+ + offset) >> PAGE_SHIFT);
break;
}
- offset = entry->offset + (map_offset - entry->vme_start);
- object = entry->object.vm_object;
+ offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start));
+ object = VME_OBJECT(entry);
vm_object_lock(object);
while (TRUE) {
vm_page_t dst_page = vm_page_lookup(object,offset);
break;
}
} else {
- phys_page = (ppnum_t)(dst_page->phys_page);
+ phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page));
vm_object_unlock(object);
break;
}
return phys_page;
}
-
-
+#if 0
kern_return_t kernel_object_iopl_request( /* forward */
vm_named_entry_t named_entry,
memory_object_offset_t offset,
named_entry_lock(named_entry);
- if (named_entry->is_pager) {
- object = vm_object_enter(named_entry->backing.pager,
- named_entry->offset + named_entry->size,
- named_entry->internal,
- FALSE,
- FALSE);
- if (object == VM_OBJECT_NULL) {
- named_entry_unlock(named_entry);
- return(KERN_INVALID_OBJECT);
- }
-
- /* JMM - drop reference on the pager here? */
-
- /* create an extra reference for the object */
- vm_object_lock(object);
- vm_object_reference_locked(object);
- named_entry->backing.object = object;
- named_entry->is_pager = FALSE;
- named_entry_unlock(named_entry);
-
- /* wait for object (if any) to be ready */
- if (!named_entry->internal) {
- while (!object->pager_ready) {
- vm_object_wait(object,
- VM_OBJECT_EVENT_PAGER_READY,
- THREAD_UNINT);
- vm_object_lock(object);
- }
- }
- vm_object_unlock(object);
-
- } else {
- /* This is the case where we are going to operate */
- /* an an already known object. If the object is */
- /* not ready it is internal. An external */
- /* object cannot be mapped until it is ready */
- /* we can therefore avoid the ready check */
- /* in this case. */
- object = named_entry->backing.object;
- vm_object_reference(object);
- named_entry_unlock(named_entry);
- }
+ /* This is the case where we are going to operate */
+ /* on an already known object. If the object is */
+ /* not ready it is internal. An external */
+ /* object cannot be mapped until it is ready */
+ /* we can therefore avoid the ready check */
+ /* in this case. */
+ object = named_entry->backing.object;
+ vm_object_reference(object);
+ named_entry_unlock(named_entry);
if (!object->private) {
if (*upl_size > MAX_UPL_TRANSFER_BYTES)
upl_ptr,
user_page_list,
page_list_count,
- caller_flags);
+ (upl_control_flags_t)(unsigned int)caller_flags);
vm_object_deallocate(object);
return ret;
}
+#endif
+
+/*
+ * These symbols are looked up at runtime by vmware, VirtualBox,
+ * despite not being exported in the symbol sets.
+ */
+
+#if defined(__x86_64__)
+
+kern_return_t
+mach_vm_map(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t initial_size,
+ mach_vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
+kern_return_t
+mach_vm_remap(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_map_t src_map,
+ mach_vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection,
+ vm_prot_t *max_protection,
+ vm_inherit_t inheritance);
+
+kern_return_t
+mach_vm_map(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t initial_size,
+ mach_vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ return (mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
+ offset, copy, cur_protection, max_protection, inheritance));
+}
+
+kern_return_t
+mach_vm_remap(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_map_t src_map,
+ mach_vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection,
+ vm_prot_t *max_protection,
+ vm_inherit_t inheritance)
+{
+ return (mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
+ copy, cur_protection, max_protection, inheritance));
+}
+
+kern_return_t
+vm_map(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
+kern_return_t
+vm_map(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return (vm_map_kernel(target_map, address, size, mask, flags, tag, port, offset, copy, cur_protection, max_protection, inheritance));
+}
+
+#endif /* __x86_64__ */