/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define __DEBUG_ONLY
#endif /* !MACH_VM_DEBUG */
-#if VM32_SUPPORT
+#ifdef VM32_SUPPORT
#include <mach/vm32_map_server.h>
#include <mach/vm_map.h>
return KERN_FAILURE;
#else
vm_map_copy_t copy;
- vm_offset_t addr; /* memory for OOL data */
+ vm_offset_t addr = 0; /* memory for OOL data */
vm_size_t size; /* size of the memory */
unsigned int room; /* room for this many objects */
unsigned int used; /* actually this many objects */
vio->vio_internal =
cobject->internal;
vio->vio_temporary =
- cobject->temporary;
+ FALSE;
vio->vio_alive =
cobject->alive;
vio->vio_purgable =
size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
VM_MAP_PAGE_MASK(ipc_kernel_map));
- kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
- kr = vm_map_wire(
+ kr = vm_map_wire_kernel(
ipc_kernel_map,
vm_map_trunc_page(addr,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr + size,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
VM_PROT_READ|VM_PROT_WRITE,
+ VM_KERN_MEMORY_IPC,
FALSE);
assert(kr == KERN_SUCCESS);
}
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
} else {
- vm_size_t size_used =
- vm_map_round_page(used * sizeof(vm_info_object_t),
+ vm_size_t size_used = (used * sizeof(vm_info_object_t));
+ vm_size_t vmsize_used = vm_map_round_page(size_used,
VM_MAP_PAGE_MASK(ipc_kernel_map));
kr = vm_map_unwire(
(vm_map_size_t)size_used, TRUE, ©);
assert(kr == KERN_SUCCESS);
- if (size != size_used)
+ if (size != vmsize_used)
kmem_free(ipc_kernel_map,
- addr + size_used, size - size_used);
+ addr + vmsize_used, size - vmsize_used);
}
*regionp = region;
return KERN_FAILURE;
#else
vm_map_copy_t copy;
- vm_offset_t addr; /* memory for OOL data */
+ vm_offset_t addr = 0; /* memory for OOL data */
vm_size_t size; /* size of the memory */
unsigned int room; /* room for this many objects */
unsigned int used; /* actually this many objects */
vio->vio_internal =
cobject->internal;
vio->vio_temporary =
- cobject->temporary;
+ FALSE;
vio->vio_alive =
cobject->alive;
vio->vio_purgable =
size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
VM_MAP_PAGE_MASK(ipc_kernel_map));
- kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
- kr = vm_map_wire(
+ kr = vm_map_wire_kernel(
ipc_kernel_map,
vm_map_trunc_page(addr,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr + size,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
VM_PROT_READ|VM_PROT_WRITE,
+ VM_KERN_MEMORY_IPC,
FALSE);
assert(kr == KERN_SUCCESS);
}
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
} else {
- vm_size_t size_used =
- vm_map_round_page(used * sizeof(vm_info_object_t),
+ vm_size_t size_used = (used * sizeof(vm_info_object_t));
+ vm_size_t vmsize_used = vm_map_round_page(size_used,
VM_MAP_PAGE_MASK(ipc_kernel_map));
kr = vm_map_unwire(
(vm_map_size_t)size_used, TRUE, ©);
assert(kr == KERN_SUCCESS);
- if (size != size_used)
+ if (size != vmsize_used)
kmem_free(ipc_kernel_map,
- addr + size_used, size - size_used);
+ addr + vmsize_used, size - vmsize_used);
}
*regionp = region;
vm_size_t size, size_used;
unsigned int actual, space;
page_address_array_t list;
- vm_offset_t addr;
+ vm_offset_t addr = 0;
if (map == VM_MAP_NULL)
return (KERN_INVALID_ARGUMENT);
VM_MAP_PAGE_MASK(ipc_kernel_map));
for (;;) {
- (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+ (void) vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
(void) vm_map_unwire(
ipc_kernel_map,
vm_map_trunc_page(addr,
(void) kmem_free(ipc_kernel_map, addr, size);
}
else {
+ vm_size_t vmsize_used;
*pages_count = actual;
- size_used = vm_map_round_page(actual * sizeof(vm_offset_t),
- VM_MAP_PAGE_MASK(ipc_kernel_map));
- (void) vm_map_wire(
+ size_used = (actual * sizeof(vm_offset_t));
+ vmsize_used = vm_map_round_page(size_used,
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
+ (void) vm_map_wire_kernel(
ipc_kernel_map,
vm_map_trunc_page(addr,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr + size,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
VM_PROT_READ|VM_PROT_WRITE,
+ VM_KERN_MEMORY_IPC,
FALSE);
(void) vm_map_copyin(ipc_kernel_map,
(vm_map_address_t)addr,
(vm_map_size_t)size_used,
TRUE,
(vm_map_copy_t *)pages);
- if (size_used != size) {
+ if (vmsize_used != size) {
(void) kmem_free(ipc_kernel_map,
- addr + size_used,
- size - size_used);
+ addr + vmsize_used,
+ size - vmsize_used);
}
}
#if !MACH_VM_DEBUG
return KERN_FAILURE;
#else
- vm_offset_t addr;
+ vm_offset_t addr = 0;
vm_size_t size = 0;
hash_info_bucket_t *info;
unsigned int potential, actual;
size = vm_map_round_page(actual * sizeof *info,
VM_MAP_PAGE_MASK(ipc_kernel_map));
- kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr, size,
+ VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
*countp = 0;
} else {
vm_map_copy_t copy;
- vm_size_t used;
+ vm_size_t used, vmused;
- used = vm_map_round_page(actual * sizeof *info,
- VM_MAP_PAGE_MASK(ipc_kernel_map));
+ used = (actual * sizeof(*info));
+ vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
- if (used != size)
- kmem_free(ipc_kernel_map, addr + used, size - used);
+ if (vmused != size)
+ kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
(vm_map_size_t)used, TRUE, ©);