+ * JMM - If the result of the copyout is an address range
+ * that cannot be described with a vm_address_t (i.e. the
+ * caller had a larger address space but used this call
+ * anyway), it will result in a truncated address being
+ * returned (and a likely confused caller).
+ */
+
+kern_return_t
+vm_read_list(
+ vm_map_t map,
+ vm_read_entry_t data_list,
+ natural_t count)
+{
+ mach_msg_type_number_t i;
+ kern_return_t error;
+ vm_map_copy_t copy;
+
+ if (map == VM_MAP_NULL ||
+ count > VM_MAP_ENTRY_MAX) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ error = KERN_SUCCESS;
+ for (i = 0; i < count; i++) {
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+
+ map_addr = (vm_map_address_t)(data_list[i].address);
+ map_size = (vm_map_size_t)(data_list[i].size);
+
+ if (map_size != 0) {
+ error = vm_map_copyin(map,
+ map_addr,
+ map_size,
+ FALSE, /* src_destroy */
+ ©);
+ if (KERN_SUCCESS == error) {
+ error = vm_map_copyout(current_task()->map,
+ &map_addr,
+ copy);
+ if (KERN_SUCCESS == error) {
+ data_list[i].address =
+ CAST_DOWN(vm_offset_t, map_addr);
+ continue;
+ }
+ vm_map_copy_discard(copy);
+ }
+ }
+ data_list[i].address = (mach_vm_address_t)0;
+ data_list[i].size = (mach_vm_size_t)0;
+ }
+ return error;
+}
+
+/*
+ * mach_vm_read_overwrite -
+ * Overwrite a range of the current map with data from the specified
+ * map/address range.
+ *
+ * In making an assumption that the current thread is local, it is
+ * no longer cluster-safe without a fully supportive local proxy
+ * thread/task (but we don't support cluster's anymore so this is moot).
+ */
+
+kern_return_t
+mach_vm_read_overwrite(
+ vm_map_t map,
+ mach_vm_address_t address,
+ mach_vm_size_t size,
+ mach_vm_address_t data,
+ mach_vm_size_t *data_size)
+{
+ kern_return_t error;
+ vm_map_copy_t copy;
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ error = vm_map_copyin(map, (vm_map_address_t)address,
+ (vm_map_size_t)size, FALSE, ©);
+
+ if (KERN_SUCCESS == error) {
+ if (copy) {
+ assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+ }
+
+ error = vm_map_copy_overwrite(current_thread()->map,
+ (vm_map_address_t)data,
+ copy, (vm_map_size_t) size, FALSE);
+ if (KERN_SUCCESS == error) {
+ *data_size = size;
+ return error;
+ }
+ vm_map_copy_discard(copy);
+ }
+ return error;
+}
+
+/*
+ * vm_read_overwrite -
+ * Overwrite a range of the current map with data from the specified
+ * map/address range.
+ *
+ * This routine adds the additional limitation that the source and
+ * destination ranges must be describable with vm_address_t values
+ * (i.e. the same size address spaces as the kernel, or at least the
+ * the ranges are in that first portion of the respective address
+ * spaces).
+ */
+
+kern_return_t
+vm_read_overwrite(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_address_t data,
+ vm_size_t *data_size)
+{
+ kern_return_t error;
+ vm_map_copy_t copy;
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ error = vm_map_copyin(map, (vm_map_address_t)address,
+ (vm_map_size_t)size, FALSE, ©);
+
+ if (KERN_SUCCESS == error) {
+ if (copy) {
+ assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+ }
+
+ error = vm_map_copy_overwrite(current_thread()->map,
+ (vm_map_address_t)data,
+ copy, (vm_map_size_t) size, FALSE);
+ if (KERN_SUCCESS == error) {
+ *data_size = size;
+ return error;
+ }
+ vm_map_copy_discard(copy);
+ }
+ return error;
+}
+
+
+/*
+ * mach_vm_write -
+ * Overwrite the specified address range with the data provided
+ * (from the current map).
+ */
+kern_return_t
+mach_vm_write(
+ vm_map_t map,
+ mach_vm_address_t address,
+ pointer_t data,
+ mach_msg_type_number_t size)
+{
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_copy_overwrite(map, (vm_map_address_t)address,
+ (vm_map_copy_t) data, size, FALSE /* interruptible XXX */);
+}
+
+/*
+ * vm_write -
+ * Overwrite the specified address range with the data provided
+ * (from the current map).
+ *
+ * The addressability of the range of addresses to overwrite is
+ * limited bu the use of a vm_address_t (same size as kernel map).
+ * Either the target map is also small, or the range is in the
+ * low addresses within it.
+ */
+kern_return_t
+vm_write(
+ vm_map_t map,
+ vm_address_t address,
+ pointer_t data,
+ mach_msg_type_number_t size)
+{
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_copy_overwrite(map, (vm_map_address_t)address,
+ (vm_map_copy_t) data, size, FALSE /* interruptible XXX */);
+}
+
+/*
+ * mach_vm_copy -
+ * Overwrite one range of the specified map with the contents of
+ * another range within that same map (i.e. both address ranges
+ * are "over there").
+ */
+kern_return_t
+mach_vm_copy(
+ vm_map_t map,
+ mach_vm_address_t source_address,
+ mach_vm_size_t size,
+ mach_vm_address_t dest_address)
+{
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = vm_map_copyin(map, (vm_map_address_t)source_address,
+ (vm_map_size_t)size, FALSE, ©);
+
+ if (KERN_SUCCESS == kr) {
+ if (copy) {
+ assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+ }
+
+ kr = vm_map_copy_overwrite(map,
+ (vm_map_address_t)dest_address,
+ copy, (vm_map_size_t) size, FALSE /* interruptible XXX */);
+
+ if (KERN_SUCCESS != kr) {
+ vm_map_copy_discard(copy);
+ }
+ }
+ return kr;
+}
+
+kern_return_t
+vm_copy(
+ vm_map_t map,
+ vm_address_t source_address,
+ vm_size_t size,
+ vm_address_t dest_address)
+{
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = vm_map_copyin(map, (vm_map_address_t)source_address,
+ (vm_map_size_t)size, FALSE, ©);
+
+ if (KERN_SUCCESS == kr) {
+ if (copy) {
+ assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size);
+ }
+
+ kr = vm_map_copy_overwrite(map,
+ (vm_map_address_t)dest_address,
+ copy, (vm_map_size_t) size, FALSE /* interruptible XXX */);
+
+ if (KERN_SUCCESS != kr) {
+ vm_map_copy_discard(copy);
+ }
+ }
+ return kr;
+}
+
+/*
+ * mach_vm_map -
+ * Map some range of an object into an address space.
+ *
+ * The object can be one of several types of objects:
+ * NULL - anonymous memory
+ * a named entry - a range within another address space
+ * or a range within a memory object
+ * a whole memory object
+ *
+ */
+kern_return_t
+mach_vm_map_external(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t initial_size,
+ mach_vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return mach_vm_map_kernel(target_map, address, initial_size, mask,
+ flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
+ port, offset, copy,
+ cur_protection, max_protection,
+ inheritance);
+}
+
+kern_return_t
+mach_vm_map_kernel(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t initial_size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_map_kernel_flags_t vmk_flags,
+ vm_tag_t tag,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ kern_return_t kr;
+ vm_map_offset_t vmmaddr;
+
+ vmmaddr = (vm_map_offset_t) *address;
+
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_MAP) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = vm_map_enter_mem_object(target_map,
+ &vmmaddr,
+ initial_size,
+ mask,
+ flags,
+ vmk_flags,
+ tag,
+ port,
+ offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+
+#if KASAN
+ if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) {
+ kasan_notify_address(vmmaddr, initial_size);
+ }
+#endif
+
+ *address = vmmaddr;
+ return kr;
+}
+
+
+/* legacy interface */
+kern_return_t
+vm_map_64_external(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return vm_map_64_kernel(target_map, address, size, mask,
+ flags, VM_MAP_KERNEL_FLAGS_NONE,
+ tag, port, offset, copy,
+ cur_protection, max_protection,
+ inheritance);
+}
+
+kern_return_t
+vm_map_64_kernel(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ vm_map_kernel_flags_t vmk_flags,
+ vm_tag_t tag,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ mach_vm_address_t map_addr;
+ mach_vm_size_t map_size;
+ mach_vm_offset_t map_mask;
+ kern_return_t kr;
+
+ map_addr = (mach_vm_address_t)*address;
+ map_size = (mach_vm_size_t)size;
+ map_mask = (mach_vm_offset_t)mask;
+
+ kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
+ flags, vmk_flags, tag,
+ port, offset, copy,
+ cur_protection, max_protection, inheritance);
+ *address = CAST_DOWN(vm_offset_t, map_addr);
+ return kr;
+}
+
+/* temporary, until world build */
+kern_return_t
+vm_map_external(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+
+ VM_GET_FLAGS_ALIAS(flags, tag);
+ return vm_map_kernel(target_map, address, size, mask,
+ flags, VM_MAP_KERNEL_FLAGS_NONE, tag,
+ port, offset, copy,
+ cur_protection, max_protection, inheritance);
+}
+
+kern_return_t
+vm_map_kernel(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ vm_map_kernel_flags_t vmk_flags,
+ vm_tag_t tag,
+ ipc_port_t port,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ mach_vm_address_t map_addr;
+ mach_vm_size_t map_size;
+ mach_vm_offset_t map_mask;
+ vm_object_offset_t obj_offset;
+ kern_return_t kr;
+
+ map_addr = (mach_vm_address_t)*address;
+ map_size = (mach_vm_size_t)size;
+ map_mask = (mach_vm_offset_t)mask;
+ obj_offset = (vm_object_offset_t)offset;
+
+ kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask,
+ flags, vmk_flags, tag,
+ port, obj_offset, copy,
+ cur_protection, max_protection, inheritance);
+ *address = CAST_DOWN(vm_offset_t, map_addr);
+ return kr;
+}
+
+/*
+ * mach_vm_remap_new -
+ * Behaves like mach_vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
+ * and {cur,max}_protection are in/out.
+ */
+kern_return_t
+mach_vm_remap_new_external(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ mach_port_t src_tport,
+ mach_vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection, /* IN/OUT */
+ vm_prot_t *max_protection, /* IN/OUT */
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+ vm_map_offset_t map_addr;
+ kern_return_t kr;
+ vm_map_t src_map;
+
+ flags |= VM_FLAGS_RETURN_DATA_ADDR;
+ VM_GET_FLAGS_ALIAS(flags, tag);
+
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_REMAP) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if ((*cur_protection & ~VM_PROT_ALL) ||
+ (*max_protection & ~VM_PROT_ALL) ||
+ (*cur_protection & *max_protection) != *cur_protection) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if ((*max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
+ (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
+ /*
+ * XXX FBDP TODO
+ * enforce target's "wx" policies
+ */
+ return KERN_PROTECTION_FAILURE;
+ }
+
+ if (copy || *max_protection == VM_PROT_READ || *max_protection == VM_PROT_NONE) {
+ src_map = convert_port_to_map_read(src_tport);
+ } else {
+ src_map = convert_port_to_map(src_tport);
+ }
+
+ if (src_map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_offset_t)*address;
+
+ kr = vm_map_remap(target_map,
+ &map_addr,
+ size,
+ mask,
+ flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ src_map,
+ memory_address,
+ copy,
+ cur_protection, /* IN/OUT */
+ max_protection, /* IN/OUT */
+ inheritance);
+
+ *address = map_addr;
+ vm_map_deallocate(src_map);
+
+ if (kr == KERN_SUCCESS) {
+ ipc_port_release_send(src_tport); /* consume on success */
+ }
+ return kr;
+}
+
+/*
+ * mach_vm_remap -
+ * Remap a range of memory from one task into another,
+ * to another address range within the same task, or
+ * over top of itself (with altered permissions and/or
+ * as an in-place copy of itself).
+ */
+kern_return_t
+mach_vm_remap_external(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_map_t src_map,
+ mach_vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+ VM_GET_FLAGS_ALIAS(flags, tag);
+
+ return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address,
+ copy, cur_protection, max_protection, inheritance);
+}
+
+kern_return_t
+mach_vm_remap_kernel(
+ vm_map_t target_map,
+ mach_vm_offset_t *address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_tag_t tag,
+ vm_map_t src_map,
+ mach_vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t inheritance)
+{
+ vm_map_offset_t map_addr;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_REMAP) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_offset_t)*address;
+
+ *cur_protection = VM_PROT_NONE;
+ *max_protection = VM_PROT_NONE;
+
+ kr = vm_map_remap(target_map,
+ &map_addr,
+ size,
+ mask,
+ flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ src_map,
+ memory_address,
+ copy,
+ cur_protection, /* IN/OUT */
+ max_protection, /* IN/OUT */
+ inheritance);
+ *address = map_addr;
+ return kr;
+}
+
+/*
+ * vm_remap_new -
+ * Behaves like vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
+ * and {cur,max}_protection are in/out.
+ */
+kern_return_t
+vm_remap_new_external(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ mach_port_t src_tport,
+ vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection, /* IN/OUT */
+ vm_prot_t *max_protection, /* IN/OUT */
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+ vm_map_offset_t map_addr;
+ kern_return_t kr;
+ vm_map_t src_map;
+
+ flags |= VM_FLAGS_RETURN_DATA_ADDR;
+ VM_GET_FLAGS_ALIAS(flags, tag);
+
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_REMAP) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if ((*cur_protection & ~VM_PROT_ALL) ||
+ (*max_protection & ~VM_PROT_ALL) ||
+ (*cur_protection & *max_protection) != *cur_protection) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if ((*max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
+ (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
+ /*
+ * XXX FBDP TODO
+ * enforce target's "wx" policies
+ */
+ return KERN_PROTECTION_FAILURE;
+ }
+
+ if (copy || *max_protection == VM_PROT_READ || *max_protection == VM_PROT_NONE) {
+ src_map = convert_port_to_map_read(src_tport);
+ } else {
+ src_map = convert_port_to_map(src_tport);
+ }
+
+ if (src_map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_offset_t)*address;
+
+ kr = vm_map_remap(target_map,
+ &map_addr,
+ size,
+ mask,
+ flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ src_map,
+ memory_address,
+ copy,
+ cur_protection, /* IN/OUT */
+ max_protection, /* IN/OUT */
+ inheritance);
+
+ *address = CAST_DOWN(vm_offset_t, map_addr);
+ vm_map_deallocate(src_map);
+
+ if (kr == KERN_SUCCESS) {
+ ipc_port_release_send(src_tport); /* consume on success */
+ }
+ return kr;
+}
+
+/*
+ * vm_remap -
+ * Remap a range of memory from one task into another,
+ * to another address range within the same task, or
+ * over top of itself (with altered permissions and/or
+ * as an in-place copy of itself).
+ *
+ * The addressability of the source and target address
+ * range is limited by the size of vm_address_t (in the
+ * kernel context).
+ */
+kern_return_t
+vm_remap_external(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ vm_map_t src_map,
+ vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t inheritance)
+{
+ vm_tag_t tag;
+ VM_GET_FLAGS_ALIAS(flags, tag);
+
+ return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map,
+ memory_address, copy, cur_protection, max_protection, inheritance);
+}
+
+kern_return_t
+vm_remap_kernel(
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ vm_tag_t tag,
+ vm_map_t src_map,
+ vm_offset_t memory_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t inheritance)
+{
+ vm_map_offset_t map_addr;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* filter out any kernel-only flags */
+ if (flags & ~VM_FLAGS_USER_REMAP) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_offset_t)*address;
+
+ *cur_protection = VM_PROT_NONE;
+ *max_protection = VM_PROT_NONE;
+
+ kr = vm_map_remap(target_map,
+ &map_addr,
+ size,
+ mask,
+ flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ src_map,
+ memory_address,
+ copy,
+ cur_protection, /* IN/OUT */
+ max_protection, /* IN/OUT */
+ inheritance);
+ *address = CAST_DOWN(vm_offset_t, map_addr);
+ return kr;
+}
+
+/*
+ * NOTE: these routine (and this file) will no longer require mach_host_server.h
+ * when mach_vm_wire and vm_wire are changed to use ledgers.
+ */
+#include <mach/mach_host_server.h>
+/*
+ * mach_vm_wire
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+kern_return_t
+mach_vm_wire_external(
+ host_priv_t host_priv,
+ vm_map_t map,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ vm_prot_t access)
+{
+ return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK);
+}
+
+kern_return_t
+mach_vm_wire_kernel(
+ host_priv_t host_priv,
+ vm_map_t map,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ vm_prot_t access,
+ vm_tag_t tag)
+{
+ kern_return_t rc;
+
+ if (host_priv == HOST_PRIV_NULL) {
+ return KERN_INVALID_HOST;
+ }
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (access & ~VM_PROT_ALL || (start + size < start)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (access != VM_PROT_NONE) {
+ rc = vm_map_wire_kernel(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ access, tag,
+ TRUE);
+ } else {
+ rc = vm_map_unwire(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ TRUE);
+ }
+ return rc;
+}
+
+/*
+ * vm_wire -
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+kern_return_t
+vm_wire(
+ host_priv_t host_priv,
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_prot_t access)
+{
+ kern_return_t rc;
+
+ if (host_priv == HOST_PRIV_NULL) {
+ return KERN_INVALID_HOST;
+ }
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if ((access & ~VM_PROT_ALL) || (start + size < start)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (size == 0) {
+ rc = KERN_SUCCESS;
+ } else if (access != VM_PROT_NONE) {
+ rc = vm_map_wire_kernel(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ access, VM_KERN_MEMORY_OSFMK,
+ TRUE);
+ } else {
+ rc = vm_map_unwire(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ TRUE);
+ }
+ return rc;
+}
+
+/*
+ * vm_msync
+ *
+ * Synchronises the memory range specified with its backing store
+ * image by either flushing or cleaning the contents to the appropriate
+ * memory manager.
+ *
+ * interpretation of sync_flags
+ * VM_SYNC_INVALIDATE - discard pages, only return precious
+ * pages to manager.
+ *
+ * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
+ * - discard pages, write dirty or precious