+ map_addr = (vm_map_offset_t)*address;
+
+ kr = vm_map_remap(target_map,
+ &map_addr,
+ size,
+ mask,
+ flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ src_map,
+ memory_address,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+ *address = CAST_DOWN(vm_offset_t, map_addr);
+ return kr;
+}
+
+/*
+ * NOTE: these routine (and this file) will no longer require mach_host_server.h
+ * when mach_vm_wire and vm_wire are changed to use ledgers.
+ */
+#include <mach/mach_host_server.h>
+/*
+ * mach_vm_wire
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+kern_return_t
+mach_vm_wire_external(
+ host_priv_t host_priv,
+ vm_map_t map,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ vm_prot_t access)
+{
+ return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK);
+}
+
+kern_return_t
+mach_vm_wire_kernel(
+ host_priv_t host_priv,
+ vm_map_t map,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ vm_prot_t access,
+ vm_tag_t tag)
+{
+ kern_return_t rc;
+
+ if (host_priv == HOST_PRIV_NULL) {
+ return KERN_INVALID_HOST;
+ }
+
+ assert(host_priv == &realhost);
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (access & ~VM_PROT_ALL || (start + size < start)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (access != VM_PROT_NONE) {
+ rc = vm_map_wire_kernel(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ access, tag,
+ TRUE);
+ } else {
+ rc = vm_map_unwire(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ TRUE);
+ }
+ return rc;
+}
+
+/*
+ * vm_wire -
+ * Specify that the range of the virtual address space
+ * of the target task must not cause page faults for
+ * the indicated accesses.
+ *
+ * [ To unwire the pages, specify VM_PROT_NONE. ]
+ */
+kern_return_t
+vm_wire(
+ host_priv_t host_priv,
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_prot_t access)
+{
+ kern_return_t rc;
+
+ if (host_priv == HOST_PRIV_NULL) {
+ return KERN_INVALID_HOST;
+ }
+
+ assert(host_priv == &realhost);
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if ((access & ~VM_PROT_ALL) || (start + size < start)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (size == 0) {
+ rc = KERN_SUCCESS;
+ } else if (access != VM_PROT_NONE) {
+ rc = vm_map_wire_kernel(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ access, VM_KERN_MEMORY_OSFMK,
+ TRUE);
+ } else {
+ rc = vm_map_unwire(map,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(start + size,
+ VM_MAP_PAGE_MASK(map)),
+ TRUE);
+ }
+ return rc;
+}
+
+/*
+ * vm_msync
+ *
+ * Synchronises the memory range specified with its backing store
+ * image by either flushing or cleaning the contents to the appropriate
+ * memory manager.
+ *
+ * interpretation of sync_flags
+ * VM_SYNC_INVALIDATE - discard pages, only return precious
+ * pages to manager.
+ *
+ * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
+ * - discard pages, write dirty or precious
+ * pages back to memory manager.
+ *
+ * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
+ * - write dirty or precious pages back to
+ * the memory manager.
+ *
+ * VM_SYNC_CONTIGUOUS - does everything normally, but if there
+ * is a hole in the region, and we would
+ * have returned KERN_SUCCESS, return
+ * KERN_INVALID_ADDRESS instead.
+ *
+ * RETURNS
+ * KERN_INVALID_TASK Bad task parameter
+ * KERN_INVALID_ARGUMENT both sync and async were specified.
+ * KERN_SUCCESS The usual.
+ * KERN_INVALID_ADDRESS There was a hole in the region.
+ */
+
+kern_return_t
+mach_vm_msync(
+ vm_map_t map,
+ mach_vm_address_t address,
+ mach_vm_size_t size,
+ vm_sync_t sync_flags)
+{
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ return vm_map_msync(map, (vm_map_address_t)address,
+ (vm_map_size_t)size, sync_flags);
+}
+
+/*
+ * vm_msync
+ *
+ * Synchronises the memory range specified with its backing store
+ * image by either flushing or cleaning the contents to the appropriate
+ * memory manager.
+ *
+ * interpretation of sync_flags
+ * VM_SYNC_INVALIDATE - discard pages, only return precious
+ * pages to manager.
+ *
+ * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
+ * - discard pages, write dirty or precious
+ * pages back to memory manager.
+ *
+ * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
+ * - write dirty or precious pages back to
+ * the memory manager.
+ *
+ * VM_SYNC_CONTIGUOUS - does everything normally, but if there
+ * is a hole in the region, and we would
+ * have returned KERN_SUCCESS, return
+ * KERN_INVALID_ADDRESS instead.
+ *
+ * The addressability of the range is limited to that which can
+ * be described by a vm_address_t.
+ *
+ * RETURNS
+ * KERN_INVALID_TASK Bad task parameter
+ * KERN_INVALID_ARGUMENT both sync and async were specified.
+ * KERN_SUCCESS The usual.
+ * KERN_INVALID_ADDRESS There was a hole in the region.
+ */
+
+kern_return_t
+vm_msync(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_sync_t sync_flags)
+{
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ return vm_map_msync(map, (vm_map_address_t)address,
+ (vm_map_size_t)size, sync_flags);
+}
+
+
+int
+vm_toggle_entry_reuse(int toggle, int *old_value)
+{
+ vm_map_t map = current_map();
+
+ assert(!map->is_nested_map);
+ if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) {
+ *old_value = map->disable_vmentry_reuse;
+ } else if (toggle == VM_TOGGLE_SET) {
+ vm_map_entry_t map_to_entry;
+
+ vm_map_lock(map);
+ vm_map_disable_hole_optimization(map);
+ map->disable_vmentry_reuse = TRUE;
+ __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
+ if (map->first_free == map_to_entry) {
+ map->highest_entry_end = vm_map_min(map);
+ } else {
+ map->highest_entry_end = map->first_free->vme_end;
+ }
+ vm_map_unlock(map);
+ } else if (toggle == VM_TOGGLE_CLEAR) {
+ vm_map_lock(map);
+ map->disable_vmentry_reuse = FALSE;
+ vm_map_unlock(map);
+ } else {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * mach_vm_behavior_set
+ *
+ * Sets the paging behavior attribute for the specified range
+ * in the specified map.
+ *
+ * This routine will fail with KERN_INVALID_ADDRESS if any address
+ * in [start,start+size) is not a valid allocated memory region.
+ */
+kern_return_t
+mach_vm_behavior_set(
+ vm_map_t map,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ vm_behavior_t new_behavior)
+{
+ vm_map_offset_t align_mask;
+
+ if ((map == VM_MAP_NULL) || (start + size < start)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (size == 0) {
+ return KERN_SUCCESS;
+ }
+
+ switch (new_behavior) {
+ case VM_BEHAVIOR_REUSABLE:
+ case VM_BEHAVIOR_REUSE:
+ case VM_BEHAVIOR_CAN_REUSE:
+ /*
+ * Align to the hardware page size, to allow
+ * malloc() to maximize the amount of re-usability,
+ * even on systems with larger software page size.
+ */
+ align_mask = PAGE_MASK;
+ break;
+ default:
+ align_mask = VM_MAP_PAGE_MASK(map);
+ break;
+ }
+
+ return vm_map_behavior_set(map,
+ vm_map_trunc_page(start, align_mask),
+ vm_map_round_page(start + size, align_mask),
+ new_behavior);
+}
+
+/*
+ * vm_behavior_set
+ *
+ * Sets the paging behavior attribute for the specified range
+ * in the specified map.
+ *
+ * This routine will fail with KERN_INVALID_ADDRESS if any address
+ * in [start,start+size) is not a valid allocated memory region.
+ *
+ * This routine is potentially limited in addressibility by the
+ * use of vm_offset_t (if the map provided is larger than the
+ * kernel's).
+ */
+kern_return_t
+vm_behavior_set(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_behavior_t new_behavior)
+{
+ if (start + size < start) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return mach_vm_behavior_set(map,
+ (mach_vm_offset_t) start,
+ (mach_vm_size_t) size,
+ new_behavior);
+}
+
+/*
+ * mach_vm_region:
+ *
+ * User call to obtain information about a region in
+ * a task's address map. Currently, only one flavor is
+ * supported.
+ *
+ * XXX The reserved and behavior fields cannot be filled
+ * in until the vm merge from the IK is completed, and
+ * vm_reserve is implemented.
+ *
+ * XXX Dependency: syscall_vm_region() also supports only one flavor.
+ */
+
+kern_return_t
+mach_vm_region(
+ vm_map_t map,
+ mach_vm_offset_t *address, /* IN/OUT */
+ mach_vm_size_t *size, /* OUT */
+ vm_region_flavor_t flavor, /* IN */
+ vm_region_info_t info, /* OUT */
+ mach_msg_type_number_t *count, /* IN/OUT */
+ mach_port_t *object_name) /* OUT */
+{
+ vm_map_offset_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_offset_t)*address;
+ map_size = (vm_map_size_t)*size;
+
+ /* legacy conversion */
+ if (VM_REGION_BASIC_INFO == flavor) {
+ flavor = VM_REGION_BASIC_INFO_64;
+ }
+
+ kr = vm_map_region(map,
+ &map_addr, &map_size,
+ flavor, info, count,
+ object_name);
+
+ *address = map_addr;
+ *size = map_size;
+ return kr;
+}
+
+/*
+ * vm_region_64 and vm_region:
+ *
+ * User call to obtain information about a region in
+ * a task's address map. Currently, only one flavor is
+ * supported.
+ *
+ * XXX The reserved and behavior fields cannot be filled
+ * in until the vm merge from the IK is completed, and
+ * vm_reserve is implemented.
+ *
+ * XXX Dependency: syscall_vm_region() also supports only one flavor.
+ */
+
+kern_return_t
+vm_region_64(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ vm_region_flavor_t flavor, /* IN */
+ vm_region_info_t info, /* OUT */
+ mach_msg_type_number_t *count, /* IN/OUT */
+ mach_port_t *object_name) /* OUT */
+{
+ vm_map_offset_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_offset_t)*address;
+ map_size = (vm_map_size_t)*size;
+
+ /* legacy conversion */
+ if (VM_REGION_BASIC_INFO == flavor) {
+ flavor = VM_REGION_BASIC_INFO_64;
+ }
+
+ kr = vm_map_region(map,
+ &map_addr, &map_size,
+ flavor, info, count,
+ object_name);
+
+ *address = CAST_DOWN(vm_offset_t, map_addr);
+ *size = CAST_DOWN(vm_size_t, map_size);
+
+ if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
+ return KERN_INVALID_ADDRESS;
+ }
+ return kr;
+}
+
+kern_return_t
+vm_region(
+ vm_map_t map,
+ vm_address_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ vm_region_flavor_t flavor, /* IN */
+ vm_region_info_t info, /* OUT */
+ mach_msg_type_number_t *count, /* IN/OUT */
+ mach_port_t *object_name) /* OUT */
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_address_t)*address;
+ map_size = (vm_map_size_t)*size;
+
+ kr = vm_map_region(map,
+ &map_addr, &map_size,
+ flavor, info, count,
+ object_name);
+
+ *address = CAST_DOWN(vm_address_t, map_addr);
+ *size = CAST_DOWN(vm_size_t, map_size);
+
+ if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
+ return KERN_INVALID_ADDRESS;
+ }
+ return kr;
+}
+
+/*
+ * vm_region_recurse: A form of vm_region which follows the
+ * submaps in a target map
+ *
+ */
+kern_return_t
+mach_vm_region_recurse(
+ vm_map_t map,
+ mach_vm_address_t *address,
+ mach_vm_size_t *size,
+ uint32_t *depth,
+ vm_region_recurse_info_t info,
+ mach_msg_type_number_t *infoCnt)
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_address_t)*address;
+ map_size = (vm_map_size_t)*size;
+
+ kr = vm_map_region_recurse_64(
+ map,
+ &map_addr,
+ &map_size,
+ depth,
+ (vm_region_submap_info_64_t)info,
+ infoCnt);
+
+ *address = map_addr;
+ *size = map_size;
+ return kr;
+}
+
+/*
+ * vm_region_recurse: A form of vm_region which follows the
+ * submaps in a target map
+ *
+ */
+kern_return_t
+vm_region_recurse_64(
+ vm_map_t map,
+ vm_address_t *address,
+ vm_size_t *size,
+ uint32_t *depth,
+ vm_region_recurse_info_64_t info,
+ mach_msg_type_number_t *infoCnt)
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_address_t)*address;
+ map_size = (vm_map_size_t)*size;
+
+ kr = vm_map_region_recurse_64(
+ map,
+ &map_addr,
+ &map_size,
+ depth,
+ (vm_region_submap_info_64_t)info,
+ infoCnt);
+
+ *address = CAST_DOWN(vm_address_t, map_addr);
+ *size = CAST_DOWN(vm_size_t, map_size);
+
+ if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
+ return KERN_INVALID_ADDRESS;
+ }
+ return kr;
+}
+
+kern_return_t
+vm_region_recurse(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ natural_t *depth, /* IN/OUT */
+ vm_region_recurse_info_t info32, /* IN/OUT */
+ mach_msg_type_number_t *infoCnt) /* IN/OUT */
+{
+ vm_region_submap_info_data_64_t info64;
+ vm_region_submap_info_t info;
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+
+ map_addr = (vm_map_address_t)*address;
+ map_size = (vm_map_size_t)*size;
+ info = (vm_region_submap_info_t)info32;
+ *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ kr = vm_map_region_recurse_64(map, &map_addr, &map_size,
+ depth, &info64, infoCnt);
+
+ info->protection = info64.protection;
+ info->max_protection = info64.max_protection;
+ info->inheritance = info64.inheritance;
+ info->offset = (uint32_t)info64.offset; /* trouble-maker */
+ info->user_tag = info64.user_tag;
+ info->pages_resident = info64.pages_resident;
+ info->pages_shared_now_private = info64.pages_shared_now_private;
+ info->pages_swapped_out = info64.pages_swapped_out;
+ info->pages_dirtied = info64.pages_dirtied;
+ info->ref_count = info64.ref_count;
+ info->shadow_depth = info64.shadow_depth;
+ info->external_pager = info64.external_pager;
+ info->share_mode = info64.share_mode;
+ info->is_submap = info64.is_submap;
+ info->behavior = info64.behavior;
+ info->object_id = info64.object_id;
+ info->user_wired_count = info64.user_wired_count;
+
+ *address = CAST_DOWN(vm_address_t, map_addr);
+ *size = CAST_DOWN(vm_size_t, map_size);
+ *infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
+
+ if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) {
+ return KERN_INVALID_ADDRESS;
+ }
+ return kr;
+}
+
+kern_return_t
+mach_vm_purgable_control(
+ vm_map_t map,
+ mach_vm_offset_t address,
+ vm_purgable_t control,
+ int *state)
+{
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+ /* not allowed from user-space */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_purgable_control(map,
+ vm_map_trunc_page(address, PAGE_MASK),
+ control,
+ state);
+}
+
+kern_return_t
+vm_purgable_control(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_purgable_t control,
+ int *state)
+{
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) {
+ /* not allowed from user-space */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_purgable_control(map,
+ vm_map_trunc_page(address, PAGE_MASK),
+ control,
+ state);
+}
+
+
+/*
+ * Ordinarily, the right to allocate CPM is restricted
+ * to privileged applications (those that can gain access
+ * to the host priv port). Set this variable to zero if
+ * you want to let any application allocate CPM.
+ */
+unsigned int vm_allocate_cpm_privileged = 0;
+
+/*
+ * Allocate memory in the specified map, with the caveat that
+ * the memory is physically contiguous. This call may fail
+ * if the system can't find sufficient contiguous memory.
+ * This call may cause or lead to heart-stopping amounts of
+ * paging activity.
+ *
+ * Memory obtained from this call should be freed in the
+ * normal way, viz., via vm_deallocate.
+ */
+kern_return_t
+vm_allocate_cpm(
+ host_priv_t host_priv,
+ vm_map_t map,
+ vm_address_t *addr,
+ vm_size_t size,
+ int flags)
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
+
+ if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) {
+ return KERN_INVALID_HOST;
+ }
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_addr = (vm_map_address_t)*addr;
+ map_size = (vm_map_size_t)size;
+
+ kr = vm_map_enter_cpm(map,
+ &map_addr,
+ map_size,
+ flags);
+
+ *addr = CAST_DOWN(vm_address_t, map_addr);
+ return kr;
+}
+
+
+kern_return_t
+mach_vm_page_query(
+ vm_map_t map,
+ mach_vm_offset_t offset,
+ int *disposition,
+ int *ref_count)
+{
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_page_query_internal(
+ map,
+ vm_map_trunc_page(offset, PAGE_MASK),
+ disposition, ref_count);
+}
+
+kern_return_t
+vm_map_page_query(
+ vm_map_t map,
+ vm_offset_t offset,
+ int *disposition,
+ int *ref_count)
+{
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_page_query_internal(
+ map,
+ vm_map_trunc_page(offset, PAGE_MASK),
+ disposition, ref_count);
+}
+
+kern_return_t
+mach_vm_page_range_query(
+ vm_map_t map,
+ mach_vm_offset_t address,
+ mach_vm_size_t size,
+ mach_vm_address_t dispositions_addr,
+ mach_vm_size_t *dispositions_count)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ int num_pages = 0, i = 0;
+ mach_vm_size_t curr_sz = 0, copy_sz = 0;
+ mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0;
+ mach_msg_type_number_t count = 0;
+
+ void *info = NULL;
+ void *local_disp = NULL;;
+ vm_map_size_t info_size = 0, local_disp_size = 0;
+ mach_vm_offset_t start = 0, end = 0;
+
+ if (map == VM_MAP_NULL || dispositions_count == NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ disp_buf_req_size = (*dispositions_count * sizeof(int));
+ start = mach_vm_trunc_page(address);
+ end = mach_vm_round_page(address + size);
+
+ if (end < start) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if ((end - start) < size) {
+ /*
+ * Aligned size is less than unaligned size.
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (disp_buf_req_size == 0 || (end == start)) {
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * For large requests, we will go through them
+ * MAX_PAGE_RANGE_QUERY chunk at a time.
+ */
+
+ curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY);
+ num_pages = (int) (curr_sz >> PAGE_SHIFT);
+
+ info_size = num_pages * sizeof(vm_page_info_basic_data_t);
+ info = kalloc(info_size);
+
+ if (info == NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ local_disp_size = num_pages * sizeof(int);
+ local_disp = kalloc(local_disp_size);
+
+ if (local_disp == NULL) {
+ kfree(info, info_size);
+ info = NULL;
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ while (size) {
+ count = VM_PAGE_INFO_BASIC_COUNT;
+ kr = vm_map_page_range_info_internal(
+ map,
+ start,
+ mach_vm_round_page(start + curr_sz),
+ VM_PAGE_INFO_BASIC,
+ (vm_page_info_t) info,
+ &count);
+
+ assert(kr == KERN_SUCCESS);
+
+ for (i = 0; i < num_pages; i++) {
+ ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
+ }
+
+ copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */);
+ kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
+
+ start += curr_sz;
+ disp_buf_req_size -= copy_sz;
+ disp_buf_total_size += copy_sz;
+
+ if (kr != 0) {
+ break;
+ }
+
+ if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
+ /*
+ * We might have inspected the full range OR
+ * more than it esp. if the user passed in
+ * non-page aligned start/size and/or if we
+ * descended into a submap. We are done here.
+ */
+
+ size = 0;
+ } else {
+ dispositions_addr += copy_sz;
+
+ size -= curr_sz;
+
+ curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY);
+ num_pages = (int)(curr_sz >> PAGE_SHIFT);
+ }
+ }
+
+ *dispositions_count = disp_buf_total_size / sizeof(int);
+
+ kfree(local_disp, local_disp_size);
+ local_disp = NULL;
+
+ kfree(info, info_size);
+ info = NULL;
+
+ return kr;
+}
+
+kern_return_t
+mach_vm_page_info(
+ vm_map_t map,
+ mach_vm_address_t address,
+ vm_page_info_flavor_t flavor,
+ vm_page_info_t info,
+ mach_msg_type_number_t *count)
+{
+ kern_return_t kr;
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = vm_map_page_info(map, address, flavor, info, count);
+ return kr;
+}
+
+/* map a (whole) upl into an address space */
+kern_return_t
+vm_upl_map(
+ vm_map_t map,
+ upl_t upl,
+ vm_address_t *dst_addr)
+{
+ vm_map_offset_t map_addr;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = vm_map_enter_upl(map, upl, &map_addr);
+ *dst_addr = CAST_DOWN(vm_address_t, map_addr);
+ return kr;
+}
+
+kern_return_t
+vm_upl_unmap(
+ vm_map_t map,
+ upl_t upl)
+{
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_remove_upl(map, upl);
+}
+
+/* Retrieve a upl for an object underlying an address range in a map */
+
+kern_return_t
+vm_map_get_upl(
+ vm_map_t map,
+ vm_map_offset_t map_offset,
+ upl_size_t *upl_size,
+ upl_t *upl,
+ upl_page_info_array_t page_list,
+ unsigned int *count,
+ upl_control_flags_t *flags,
+ vm_tag_t tag,
+ int force_data_sync)
+{
+ upl_control_flags_t map_flags;
+ kern_return_t kr;
+
+ if (VM_MAP_NULL == map) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ map_flags = *flags & ~UPL_NOZEROFILL;
+ if (force_data_sync) {
+ map_flags |= UPL_FORCE_DATA_SYNC;
+ }
+
+ kr = vm_map_create_upl(map,
+ map_offset,
+ upl_size,
+ upl,
+ page_list,
+ count,
+ &map_flags,
+ tag);
+
+ *flags = (map_flags & ~UPL_FORCE_DATA_SYNC);
+ return kr;
+}
+
+#if CONFIG_EMBEDDED
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
+int cs_executable_mem_entry = 0;
+int log_executable_mem_entry = 0;
+#endif /* CONFIG_EMBEDDED */
+
+/*
+ * mach_make_memory_entry_64
+ *
+ * Think of it as a two-stage vm_remap() operation. First
+ * you get a handle. Second, you get map that handle in
+ * somewhere else. Rather than doing it all at once (and
+ * without needing access to the other whole map).
+ */
+kern_return_t
+mach_make_memory_entry_64(
+ vm_map_t target_map,
+ memory_object_size_t *size,
+ memory_object_offset_t offset,
+ vm_prot_t permission,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_handle)
+{
+ vm_named_entry_kernel_flags_t vmne_kflags;
+
+ if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
+ /*
+ * Unknown flag: reject for forward compatibility.
+ */
+ return KERN_INVALID_VALUE;
+ }
+
+ vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
+ if (permission & MAP_MEM_LEDGER_TAGGED) {
+ vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
+ }
+ return mach_make_memory_entry_internal(target_map,
+ size,
+ offset,
+ permission,
+ vmne_kflags,
+ object_handle,
+ parent_handle);
+}
+
+kern_return_t
+mach_make_memory_entry_internal(
+ vm_map_t target_map,
+ memory_object_size_t *size,
+ memory_object_offset_t offset,
+ vm_prot_t permission,
+ vm_named_entry_kernel_flags_t vmne_kflags,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_handle)
+{
+ vm_map_version_t version;
+ vm_named_entry_t parent_entry;
+ vm_named_entry_t user_entry;
+ ipc_port_t user_handle;
+ kern_return_t kr;
+ vm_map_t real_map;
+
+ /* needed for call to vm_map_lookup_locked */
+ boolean_t wired;
+ boolean_t iskernel;
+ vm_object_offset_t obj_off;
+ vm_prot_t prot;
+ struct vm_object_fault_info fault_info = {};
+ vm_object_t object;
+ vm_object_t shadow_object;
+
+ /* needed for direct map entry manipulation */
+ vm_map_entry_t map_entry;
+ vm_map_entry_t next_entry;
+ vm_map_t local_map;
+ vm_map_t original_map = target_map;
+ vm_map_size_t total_size, map_size;
+ vm_map_offset_t map_start, map_end;
+ vm_map_offset_t local_offset;
+ vm_object_size_t mappable_size;
+
+ /*
+ * Stash the offset in the page for use by vm_map_enter_mem_object()
+ * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
+ */
+ vm_object_offset_t offset_in_page;
+
+ unsigned int access;
+ vm_prot_t protections;
+ vm_prot_t original_protections, mask_protections;
+ unsigned int wimg_mode;
+
+ boolean_t force_shadow = FALSE;
+ boolean_t use_data_addr;
+ boolean_t use_4K_compat;
+#if VM_NAMED_ENTRY_LIST
+ int alias = -1;
+#endif /* VM_NAMED_ENTRY_LIST */
+
+ if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) {
+ /*
+ * Unknown flag: reject for forward compatibility.
+ */
+ return KERN_INVALID_VALUE;
+ }
+
+ if (IP_VALID(parent_handle) &&
+ ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
+ parent_entry = (vm_named_entry_t) ip_get_kobject(parent_handle);
+ } else {
+ parent_entry = NULL;
+ }
+
+ if (parent_entry && parent_entry->is_copy) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ original_protections = permission & VM_PROT_ALL;
+ protections = original_protections;
+ mask_protections = permission & VM_PROT_IS_MASK;
+ access = GET_MAP_MEM(permission);
+ use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
+ use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0);
+
+ user_handle = IP_NULL;
+ user_entry = NULL;
+
+ map_start = vm_map_trunc_page(offset, PAGE_MASK);
+
+ if (permission & MAP_MEM_ONLY) {
+ boolean_t parent_is_object;
+
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+
+ if (use_data_addr || use_4K_compat || parent_entry == NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ parent_is_object = !parent_entry->is_sub_map;
+ object = parent_entry->backing.object;
+ if (parent_is_object && object != VM_OBJECT_NULL) {
+ wimg_mode = object->wimg_bits;
+ } else {
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ }
+ if ((access != GET_MAP_MEM(parent_entry->protection)) &&
+ !(parent_entry->protection & VM_PROT_WRITE)) {
+ return KERN_INVALID_RIGHT;
+ }
+ vm_prot_to_wimg(access, &wimg_mode);
+ if (access != MAP_MEM_NOOP) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ }
+ if (parent_is_object && object &&
+ (access != MAP_MEM_NOOP) &&
+ (!(object->nophyscache))) {
+ if (object->wimg_bits != wimg_mode) {
+ vm_object_lock(object);
+ vm_object_change_wimg_mode(object, wimg_mode);
+ vm_object_unlock(object);
+ }
+ }
+ if (object_handle) {
+ *object_handle = IP_NULL;
+ }
+ return KERN_SUCCESS;
+ } else if (permission & MAP_MEM_NAMED_CREATE) {
+ int ledger_flags = 0;
+ task_t owner;
+
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+
+ if (use_data_addr || use_4K_compat) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+
+ /*
+ * Force the creation of the VM object now.
+ */
+ if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
+ /*
+ * LP64todo - for now, we can only allocate 4GB-4096
+ * internal objects because the default pager can't
+ * page bigger ones. Remove this when it can.
+ */
+ kr = KERN_FAILURE;
+ goto make_mem_done;
+ }
+
+ object = vm_object_allocate(map_size);
+ assert(object != VM_OBJECT_NULL);
+
+ /*
+ * XXX
+ * We use this path when we want to make sure that
+ * nobody messes with the object (coalesce, for
+ * example) before we map it.
+ * We might want to use these objects for transposition via
+ * vm_object_transpose() too, so we don't want any copy or
+ * shadow objects either...
+ */
+ object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ object->true_share = TRUE;
+
+ owner = current_task();
+ if ((permission & MAP_MEM_PURGABLE) ||
+ vmne_kflags.vmnekf_ledger_tag) {
+ assert(object->vo_owner == NULL);
+ assert(object->resident_page_count == 0);
+ assert(object->wired_page_count == 0);
+ assert(owner != TASK_NULL);
+ if (vmne_kflags.vmnekf_ledger_no_footprint) {
+ ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
+ object->vo_no_footprint = TRUE;
+ }
+ if (permission & MAP_MEM_PURGABLE) {
+ if (!(permission & VM_PROT_WRITE)) {
+ /* if we can't write, we can't purge */
+ vm_object_deallocate(object);
+ kr = KERN_INVALID_ARGUMENT;
+ goto make_mem_done;
+ }
+ object->purgable = VM_PURGABLE_NONVOLATILE;
+ if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) {
+ object->purgeable_only_by_kernel = TRUE;
+ }
+#if __arm64__
+ if (owner->task_legacy_footprint) {
+ /*
+ * For ios11, we failed to account for
+ * this memory. Keep doing that for
+ * legacy apps (built before ios12),
+ * for backwards compatibility's sake...
+ */
+ owner = kernel_task;
+ }
+#endif /* __arm64__ */
+ vm_object_lock(object);
+ vm_purgeable_nonvolatile_enqueue(object, owner);
+ vm_object_unlock(object);
+ }
+ }
+
+ if (vmne_kflags.vmnekf_ledger_tag) {
+ /*
+ * Bill this object to the current task's
+ * ledgers for the given tag.
+ */
+ if (vmne_kflags.vmnekf_ledger_no_footprint) {
+ ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
+ }
+ vm_object_lock(object);
+ object->vo_ledger_tag = vmne_kflags.vmnekf_ledger_tag;
+ kr = vm_object_ownership_change(
+ object,
+ vmne_kflags.vmnekf_ledger_tag,
+ owner, /* new owner */
+ ledger_flags,
+ FALSE); /* task_objq locked? */
+ vm_object_unlock(object);
+ if (kr != KERN_SUCCESS) {
+ vm_object_deallocate(object);
+ goto make_mem_done;
+ }
+ }
+
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_iokit && /* global boot-arg */
+ ((permission & MAP_MEM_GRAB_SECLUDED)
+#if 11
+ /* XXX FBDP for my testing only */
+ || (secluded_for_fbdp && map_size == 97550336)
+#endif
+ )) {
+#if 11
+ if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
+ secluded_for_fbdp) {
+ printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size);
+ }
+#endif
+ object->can_grab_secluded = TRUE;
+ assert(!object->eligible_for_secluded);
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ /*
+ * The VM object is brand new and nobody else knows about it,
+ * so we don't need to lock it.
+ */
+
+ wimg_mode = object->wimg_bits;
+ vm_prot_to_wimg(access, &wimg_mode);
+ if (access != MAP_MEM_NOOP) {
+ object->wimg_bits = wimg_mode;
+ }
+
+ /* the object has no pages, so no WIMG bits to update here */
+
+ user_entry->backing.object = object;
+ user_entry->internal = TRUE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->offset = 0;
+ user_entry->data_offset = 0;
+ user_entry->protection = protections;
+ SET_MAP_MEM(access, user_entry->protection);
+ user_entry->size = map_size;
+
+ /* user_object pager and internal fields are not used */
+ /* when the object field is filled in. */
+
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
+ *object_handle = user_handle;
+ return KERN_SUCCESS;
+ }
+
+ if (permission & MAP_MEM_VM_COPY) {
+ vm_map_copy_t copy;
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat) {
+ offset_in_page &= ~((signed)(0xFFF));
+ }
+ } else {
+ offset_in_page = 0;
+ }
+
+ kr = vm_map_copyin_internal(target_map,
+ map_start,
+ map_size,
+ VM_MAP_COPYIN_ENTRY_LIST,
+ ©);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return KERN_FAILURE;
+ }
+
+ user_entry->backing.copy = copy;
+ user_entry->internal = FALSE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_copy = TRUE;
+ user_entry->offset = 0;
+ user_entry->protection = protections;
+ user_entry->size = map_size;
+ user_entry->data_offset = offset_in_page;
+
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
+ *object_handle = user_handle;
+ return KERN_SUCCESS;
+ }
+
+ if (permission & MAP_MEM_VM_SHARE) {
+ vm_map_copy_t copy;
+ vm_prot_t cur_prot, max_prot;
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat) {
+ offset_in_page &= ~((signed)(0xFFF));
+ }
+ } else {
+ offset_in_page = 0;
+ }
+
+ cur_prot = VM_PROT_ALL;
+ kr = vm_map_copy_extract(target_map,
+ map_start,
+ map_size,
+ ©,
+ &cur_prot,
+ &max_prot);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (mask_protections) {
+ /*
+ * We just want as much of "original_protections"
+ * as we can get out of the actual "cur_prot".
+ */
+ protections &= cur_prot;
+ if (protections == VM_PROT_NONE) {
+ /* no access at all: fail */
+ vm_map_copy_discard(copy);
+ return KERN_PROTECTION_FAILURE;
+ }
+ } else {
+ /*
+ * We want exactly "original_protections"
+ * out of "cur_prot".
+ */
+ if ((cur_prot & protections) != protections) {
+ vm_map_copy_discard(copy);
+ return KERN_PROTECTION_FAILURE;
+ }
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return KERN_FAILURE;
+ }
+
+ user_entry->backing.copy = copy;
+ user_entry->internal = FALSE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_copy = TRUE;
+ user_entry->offset = 0;
+ user_entry->protection = protections;
+ user_entry->size = map_size;
+ user_entry->data_offset = offset_in_page;
+
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
+ *object_handle = user_handle;
+ return KERN_SUCCESS;
+ }
+
+ if (parent_entry == NULL ||
+ (permission & MAP_MEM_NAMED_REUSE)) {
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat) {
+ offset_in_page &= ~((signed)(0xFFF));
+ }
+ } else {
+ offset_in_page = 0;
+ }
+
+ /* Create a named object based on address range within the task map */
+ /* Go find the object at given address */
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+redo_lookup:
+ protections = original_protections;
+ vm_map_lock_read(target_map);
+
+ /* get the object associated with the target address */
+ /* note we check the permission of the range against */
+ /* that requested by the caller */
+
+ kr = vm_map_lookup_locked(&target_map, map_start,
+ protections | mask_protections,
+ OBJECT_LOCK_EXCLUSIVE, &version,
+ &object, &obj_off, &prot, &wired,
+ &fault_info,
+ &real_map);