#include <mach_vm_debug.h>
#include <mach/kern_return.h>
#include <mach/mach_host_server.h>
-#include <mach/vm_map_server.h>
#include <mach_debug/vm_info.h>
#include <mach_debug/page_info.h>
#include <mach_debug/hash_info.h>
#define __DEBUG_ONLY
#endif /* !MACH_VM_DEBUG */
+#if VM32_SUPPORT
+
+#include <mach/vm32_map_server.h>
+#include <mach/vm_map.h>
+
/*
* Routine: mach_vm_region_info [kernel call]
* Purpose:
*/
kern_return_t
-mach_vm_region_info(
+vm32_region_info(
__DEBUG_ONLY vm_map_t map,
- __DEBUG_ONLY vm_offset_t address,
+ __DEBUG_ONLY vm32_offset_t address,
__DEBUG_ONLY vm_info_region_t *regionp,
__DEBUG_ONLY vm_info_object_array_t *objectsp,
__DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
/* cmap is read-locked; we have a real entry */
object = entry->object.vm_object;
- region.vir_start = entry->vme_start;
- region.vir_end = entry->vme_end;
- region.vir_object = (vm_offset_t) object;
- region.vir_offset = entry->offset;
+ region.vir_start = (natural_t) entry->vme_start;
+ region.vir_end = (natural_t) entry->vme_end;
+ region.vir_object = (natural_t)(uintptr_t) object;
+ region.vir_offset = (natural_t) entry->offset;
region.vir_needs_copy = entry->needs_copy;
region.vir_protection = entry->protection;
region.vir_max_protection = entry->max_protection;
region.vir_user_wired_count = entry->user_wired_count;
used = 0;
- room = size / sizeof(vm_info_object_t);
+ room = (unsigned int) (size / sizeof(vm_info_object_t));
if (object == VM_OBJECT_NULL) {
vm_map_unlock_read(cmap);
&((vm_info_object_t *) addr)[used];
vio->vio_object =
- (vm_offset_t) cobject;
+ (natural_t)(uintptr_t) cobject;
vio->vio_size =
- cobject->size;
+ (natural_t) cobject->vo_size;
vio->vio_ref_count =
cobject->ref_count;
vio->vio_resident_page_count =
cobject->resident_page_count;
vio->vio_copy =
- (vm_offset_t) cobject->copy;
+ (natural_t)(uintptr_t) cobject->copy;
vio->vio_shadow =
- (vm_offset_t) cobject->shadow;
+ (natural_t)(uintptr_t) cobject->shadow;
vio->vio_shadow_offset =
- cobject->shadow_offset;
+ (natural_t) cobject->vo_shadow_offset;
vio->vio_paging_offset =
- cobject->paging_offset;
+ (natural_t) cobject->paging_offset;
vio->vio_copy_strategy =
cobject->copy_strategy;
vio->vio_last_alloc =
- cobject->last_alloc;
+ (vm_offset_t) cobject->last_alloc;
vio->vio_paging_in_progress =
- cobject->paging_in_progress;
+ cobject->paging_in_progress +
+ cobject->activity_in_progress;
vio->vio_pager_created =
cobject->pager_created;
vio->vio_pager_initialized =
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
- size = round_page_32(2 * used * sizeof(vm_info_object_t));
+ size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
- kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ kr = vm_map_wire(
+ ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_PROT_READ|VM_PROT_WRITE,
+ FALSE);
assert(kr == KERN_SUCCESS);
}
kmem_free(ipc_kernel_map, addr, size);
} else {
vm_size_t size_used =
- round_page_32(used * sizeof(vm_info_object_t));
-
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size_used), FALSE);
+ vm_map_round_page(used * sizeof(vm_info_object_t),
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
+
+ kr = vm_map_unwire(
+ ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(addr + size_used,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
*/
kern_return_t
-mach_vm_region_info_64(
+vm32_region_info_64(
__DEBUG_ONLY vm_map_t map,
- __DEBUG_ONLY vm_offset_t address,
+ __DEBUG_ONLY vm32_offset_t address,
__DEBUG_ONLY vm_info_region_64_t *regionp,
__DEBUG_ONLY vm_info_object_array_t *objectsp,
__DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
/* cmap is read-locked; we have a real entry */
object = entry->object.vm_object;
- region.vir_start = entry->vme_start;
- region.vir_end = entry->vme_end;
- region.vir_object = (vm_offset_t) object;
+ region.vir_start = (natural_t) entry->vme_start;
+ region.vir_end = (natural_t) entry->vme_end;
+ region.vir_object = (natural_t)(uintptr_t) object;
region.vir_offset = entry->offset;
region.vir_needs_copy = entry->needs_copy;
region.vir_protection = entry->protection;
region.vir_user_wired_count = entry->user_wired_count;
used = 0;
- room = size / sizeof(vm_info_object_t);
+ room = (unsigned int) (size / sizeof(vm_info_object_t));
if (object == VM_OBJECT_NULL) {
vm_map_unlock_read(cmap);
&((vm_info_object_t *) addr)[used];
vio->vio_object =
- (vm_offset_t) cobject;
+ (natural_t)(uintptr_t) cobject;
vio->vio_size =
- cobject->size;
+ (natural_t) cobject->vo_size;
vio->vio_ref_count =
cobject->ref_count;
vio->vio_resident_page_count =
cobject->resident_page_count;
vio->vio_copy =
- (vm_offset_t) cobject->copy;
+ (natural_t)(uintptr_t) cobject->copy;
vio->vio_shadow =
- (vm_offset_t) cobject->shadow;
+ (natural_t)(uintptr_t) cobject->shadow;
vio->vio_shadow_offset =
- cobject->shadow_offset;
+ (natural_t) cobject->vo_shadow_offset;
vio->vio_paging_offset =
- cobject->paging_offset;
+ (natural_t) cobject->paging_offset;
vio->vio_copy_strategy =
cobject->copy_strategy;
vio->vio_last_alloc =
- cobject->last_alloc;
+ (vm_offset_t) cobject->last_alloc;
vio->vio_paging_in_progress =
- cobject->paging_in_progress;
+ cobject->paging_in_progress +
+ cobject->activity_in_progress;
vio->vio_pager_created =
cobject->pager_created;
vio->vio_pager_initialized =
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
- size = round_page_32(2 * used * sizeof(vm_info_object_t));
+ size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
- kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ kr = vm_map_wire(
+ ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_PROT_READ|VM_PROT_WRITE,
+ FALSE);
assert(kr == KERN_SUCCESS);
}
kmem_free(ipc_kernel_map, addr, size);
} else {
vm_size_t size_used =
- round_page_32(used * sizeof(vm_info_object_t));
-
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size_used), FALSE);
+ vm_map_round_page(used * sizeof(vm_info_object_t),
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
+
+ kr = vm_map_unwire(
+ ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(addr + size_used,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
* Return an array of virtual pages that are mapped to a task.
*/
kern_return_t
-vm_mapped_pages_info(
+vm32_mapped_pages_info(
__DEBUG_ONLY vm_map_t map,
__DEBUG_ONLY page_address_array_t *pages,
__DEBUG_ONLY mach_msg_type_number_t *pages_count)
pmap = map->pmap;
size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
- size = round_page_32(size);
+ size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
for (;;) {
(void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
- (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size), FALSE);
+ (void) vm_map_unwire(
+ ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ FALSE);
list = (page_address_array_t) addr;
- space = size / sizeof(vm_offset_t);
+ space = (unsigned int) (size / sizeof(vm_offset_t));
actual = pmap_list_resident_pages(pmap,
list,
/*
* Try again, doubling the size
*/
- size = round_page_32(actual * sizeof(vm_offset_t));
+ size = vm_map_round_page(actual * sizeof(vm_offset_t),
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
}
if (actual == 0) {
*pages = 0;
}
else {
*pages_count = actual;
- size_used = round_page_32(actual * sizeof(vm_offset_t));
- (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ size_used = vm_map_round_page(actual * sizeof(vm_offset_t),
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
+ (void) vm_map_wire(
+ ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_PROT_READ|VM_PROT_WRITE,
+ FALSE);
(void) vm_map_copyin(ipc_kernel_map,
(vm_map_address_t)addr,
(vm_map_size_t)size_used,
#endif /* MACH_VM_DEBUG */
}
+#endif /* VM32_SUPPORT */
+
/*
* Routine: host_virtual_physical_table_info
* Purpose:
if (info != *infop)
kmem_free(ipc_kernel_map, addr, size);
- size = round_page_32(actual * sizeof *info);
+ size = vm_map_round_page(actual * sizeof *info,
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
info = (hash_info_bucket_t *) addr;
- potential = size/sizeof *info;
+ potential = (unsigned int) (size/sizeof (*info));
}
if (info == *infop) {
vm_map_copy_t copy;
vm_size_t used;
- used = round_page_32(actual * sizeof *info);
+ used = vm_map_round_page(actual * sizeof *info,
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
if (used != size)
kmem_free(ipc_kernel_map, addr + used, size - used);