X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3e170ce000f1506b7b5d2c5c7faec85ceabb573d..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/osfmk/vm/vm_debug.c diff --git a/osfmk/vm/vm_debug.c b/osfmk/vm/vm_debug.c index 95531ddad..12826e385 100644 --- a/osfmk/vm/vm_debug.c +++ b/osfmk/vm/vm_debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -91,7 +91,7 @@ #define __DEBUG_ONLY #endif /* !MACH_VM_DEBUG */ -#if VM32_SUPPORT +#ifdef VM32_SUPPORT #include #include @@ -122,7 +122,7 @@ vm32_region_info( return KERN_FAILURE; #else vm_map_copy_t copy; - vm_offset_t addr; /* memory for OOL data */ + vm_offset_t addr = 0; /* memory for OOL data */ vm_size_t size; /* size of the memory */ unsigned int room; /* room for this many objects */ unsigned int used; /* actually this many objects */ @@ -237,7 +237,7 @@ vm32_region_info( vio->vio_internal = cobject->internal; vio->vio_temporary = - cobject->temporary; + FALSE; vio->vio_alive = cobject->alive; vio->vio_purgable = @@ -270,17 +270,18 @@ vm32_region_info( size = vm_map_round_page(2 * used * sizeof(vm_info_object_t), VM_MAP_PAGE_MASK(ipc_kernel_map)); - kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); + kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; - kr = vm_map_wire( + kr = vm_map_wire_kernel( ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), VM_PROT_READ|VM_PROT_WRITE, + VM_KERN_MEMORY_IPC, FALSE); assert(kr == KERN_SUCCESS); } @@ -293,8 +294,8 @@ vm32_region_info( if (size != 0) kmem_free(ipc_kernel_map, addr, size); } else { - vm_size_t size_used = - vm_map_round_page(used * sizeof(vm_info_object_t), + vm_size_t size_used = (used * sizeof(vm_info_object_t)); + vm_size_t vmsize_used = vm_map_round_page(size_used, VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_map_unwire( @@ -310,9 +311,9 @@ vm32_region_info( (vm_map_size_t)size_used, TRUE, ©); assert(kr == KERN_SUCCESS); - if (size != size_used) + if (size != vmsize_used) kmem_free(ipc_kernel_map, - addr + size_used, size - size_used); + addr + vmsize_used, size - vmsize_used); } *regionp = region; @@ -338,7 +339,7 @@ vm32_region_info_64( return KERN_FAILURE; #else vm_map_copy_t copy; - vm_offset_t addr; /* memory for OOL data */ + vm_offset_t addr = 0; /* memory for OOL data */ vm_size_t size; /* size of the memory */ unsigned int room; /* room for this many objects */ unsigned int used; /* actually this many objects */ @@ -451,7 +452,7 @@ vm32_region_info_64( vio->vio_internal = cobject->internal; vio->vio_temporary = - cobject->temporary; + FALSE; vio->vio_alive = cobject->alive; vio->vio_purgable = @@ -484,17 +485,18 @@ vm32_region_info_64( size = vm_map_round_page(2 * used * sizeof(vm_info_object_t), VM_MAP_PAGE_MASK(ipc_kernel_map)); - kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); + kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; - kr = vm_map_wire( + kr = vm_map_wire_kernel( ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), VM_PROT_READ|VM_PROT_WRITE, + VM_KERN_MEMORY_IPC, FALSE); assert(kr == KERN_SUCCESS); } @@ -507,8 +509,8 @@ vm32_region_info_64( if (size != 0) kmem_free(ipc_kernel_map, addr, size); } else { - vm_size_t size_used = - vm_map_round_page(used * sizeof(vm_info_object_t), + vm_size_t size_used = (used * sizeof(vm_info_object_t)); + vm_size_t vmsize_used = vm_map_round_page(size_used, VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_map_unwire( @@ -524,9 +526,9 @@ vm32_region_info_64( (vm_map_size_t)size_used, TRUE, ©); assert(kr == KERN_SUCCESS); - if (size != size_used) + if (size != vmsize_used) kmem_free(ipc_kernel_map, - addr + size_used, size - size_used); + addr + vmsize_used, size - vmsize_used); } *regionp = region; @@ -551,7 +553,7 @@ vm32_mapped_pages_info( vm_size_t size, size_used; unsigned int actual, space; page_address_array_t list; - vm_offset_t addr; + vm_offset_t addr = 0; if (map == VM_MAP_NULL) return (KERN_INVALID_ARGUMENT); @@ -562,7 +564,7 @@ vm32_mapped_pages_info( VM_MAP_PAGE_MASK(ipc_kernel_map)); for (;;) { - (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); + (void) vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); (void) vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr, @@ -597,26 +599,29 @@ vm32_mapped_pages_info( (void) kmem_free(ipc_kernel_map, addr, size); } else { + vm_size_t vmsize_used; *pages_count = actual; - size_used = vm_map_round_page(actual * sizeof(vm_offset_t), - VM_MAP_PAGE_MASK(ipc_kernel_map)); - (void) vm_map_wire( + size_used = (actual * sizeof(vm_offset_t)); + vmsize_used = vm_map_round_page(size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)); + (void) vm_map_wire_kernel( ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), VM_PROT_READ|VM_PROT_WRITE, + VM_KERN_MEMORY_IPC, FALSE); (void) vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)size_used, TRUE, (vm_map_copy_t *)pages); - if (size_used != size) { + if (vmsize_used != size) { (void) kmem_free(ipc_kernel_map, - addr + size_used, - size - size_used); + addr + vmsize_used, + size - vmsize_used); } } @@ -647,7 +652,7 @@ host_virtual_physical_table_info( #if !MACH_VM_DEBUG return KERN_FAILURE; #else - vm_offset_t addr; + vm_offset_t addr = 0; vm_size_t size = 0; hash_info_bucket_t *info; unsigned int potential, actual; @@ -673,7 +678,8 @@ host_virtual_physical_table_info( size = vm_map_round_page(actual * sizeof *info, VM_MAP_PAGE_MASK(ipc_kernel_map)); - kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC); + kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, + VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; @@ -691,13 +697,13 @@ host_virtual_physical_table_info( *countp = 0; } else { vm_map_copy_t copy; - vm_size_t used; + vm_size_t used, vmused; - used = vm_map_round_page(actual * sizeof *info, - VM_MAP_PAGE_MASK(ipc_kernel_map)); + used = (actual * sizeof(*info)); + vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map)); - if (used != size) - kmem_free(ipc_kernel_map, addr + used, size - used); + if (vmused != size) + kmem_free(ipc_kernel_map, addr + vmused, size - vmused); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)used, TRUE, ©);