* Forward declarations for internal functions.
*/
extern kern_return_t kmem_alloc_pages(
- register vm_object_t object,
- register vm_object_offset_t offset,
- register vm_object_size_t size);
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size);
kern_return_t
kmem_alloc_contig(
kern_return_t
kernel_memory_allocate(
- register vm_map_t map,
- register vm_offset_t *addrp,
- register vm_size_t size,
- register vm_offset_t mask,
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_offset_t mask,
int flags,
vm_tag_t tag)
{
}
vm_page_more_fictitious();
}
- mem->pageq.next = (queue_entry_t)guard_page_list;
+ mem->snext = guard_page_list;
guard_page_list = mem;
}
}
VM_PAGE_WAIT();
}
- mem->pageq.next = (queue_entry_t)wired_page_list;
+ mem->snext = wired_page_list;
wired_page_list = mem;
}
}
object = vm_object_allocate(map_size);
}
+ if (flags & KMA_ATOMIC)
+ vm_alloc_flags |= VM_FLAGS_ATOMIC_ENTRY;
+
kr = vm_map_find_space(map, &map_addr,
fill_size, map_mask,
vm_alloc_flags, &entry);
panic("kernel_memory_allocate: guard_page_list == NULL");
mem = guard_page_list;
- guard_page_list = (vm_page_t)mem->pageq.next;
- mem->pageq.next = NULL;
+ guard_page_list = mem->snext;
+ mem->snext = NULL;
vm_page_insert(mem, object, offset + pg_offset);
panic("kernel_memory_allocate: wired_page_list == NULL");
mem = wired_page_list;
- wired_page_list = (vm_page_t)mem->pageq.next;
- mem->pageq.next = NULL;
+ wired_page_list = mem->snext;
+ mem->snext = NULL;
+
+ assert(mem->wire_count == 0);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+
+ mem->vm_page_q_state = VM_PAGE_IS_WIRED;
mem->wire_count++;
+ if (__improbable(mem->wire_count == 0)) {
+ panic("kernel_memory_allocate(%p): wire_count overflow",
+ mem);
+ }
vm_page_insert_wired(mem, object, offset + pg_offset, tag);
if (flags & KMA_NOENCRYPT) {
bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
- pmap_set_noencrypt(mem->phys_page);
+ pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
}
}
}
panic("kernel_memory_allocate: guard_page_list == NULL");
mem = guard_page_list;
- guard_page_list = (vm_page_t)mem->pageq.next;
- mem->pageq.next = NULL;
+ guard_page_list = mem->snext;
+ mem->snext = NULL;
vm_page_insert(mem, object, offset + pg_offset);
VM_PAGE_WAIT();
}
- mem->pageq.next = (queue_entry_t) page_list;
+ mem->snext = page_list;
page_list = mem;
pg_offset -= PAGE_SIZE_64;
kr = pmap_enter_options(kernel_pmap,
- addr + pg_offset, mem->phys_page,
+ addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
PMAP_OPTIONS_INTERNAL, NULL);
assert(kr == KERN_SUCCESS);
pg_offset += PAGE_SIZE_64) {
mem = page_list;
- page_list = (vm_page_t) mem->pageq.next;
- mem->pageq.next = NULL;
+ page_list = mem->snext;
+ mem->snext = NULL;
vm_page_insert(mem, object, offset + pg_offset);
assert(mem->busy);
mem->busy = FALSE;
mem->pmapped = TRUE;
mem->wpmapped = TRUE;
- mem->compressor = TRUE;
+ mem->vm_page_q_state = VM_PAGE_USED_BY_COMPRESSOR;
}
vm_object_unlock(object);
}
VM_PAGE_WAIT();
}
- mem->pageq.next = (queue_entry_t) page_list;
+ mem->snext = page_list;
page_list = mem;
}
if (flags & KMA_KOBJECT) {
panic("kernel_memory_populate: page_list == NULL");
mem = page_list;
- page_list = (vm_page_t) mem->pageq.next;
- mem->pageq.next = NULL;
+ page_list = mem->snext;
+ mem->snext = NULL;
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ mem->vm_page_q_state = VM_PAGE_IS_WIRED;
mem->wire_count++;
+ if (__improbable(mem->wire_count == 0)) {
+ panic("kernel_memory_populate(%p): wire_count overflow",
+ mem);
+ }
vm_page_insert_wired(mem, object, offset + pg_offset, tag);
}
if (flags & KMA_NOENCRYPT) {
bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
- pmap_set_noencrypt(mem->phys_page);
+ pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
}
}
vm_page_lock_queues();
mem = vm_page_lookup(object, offset + pg_offset);
assert(mem);
-
- pmap_disconnect(mem->phys_page);
+
+ if (mem->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR)
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
mem->busy = TRUE;
vm_page_remove(mem, TRUE);
assert(mem->busy);
- assert(mem->pageq.next == NULL &&
- mem->pageq.prev == NULL);
- mem->pageq.next = (queue_entry_t)local_freeq;
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+ assert((mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q));
+
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ mem->snext = local_freeq;
local_freeq = mem;
}
vm_object_unlock(object);
return (kmem_alloc(map, addrp, size, vm_tag_bt()));
}
+
kern_return_t
kmem_alloc(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size,
- vm_tag_t tag)
+ vm_tag_t tag)
+{
+ return kmem_alloc_flags(map, addrp, size, tag, 0);
+}
+
+kern_return_t
+kmem_alloc_flags(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag,
+ int flags)
{
- kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, 0, tag);
+ kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag);
TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
return kr;
}
kern_return_t
kmem_alloc_pages(
- register vm_object_t object,
- register vm_object_offset_t offset,
- register vm_object_size_t size)
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size)
{
vm_object_size_t alloc_size;
alloc_size = vm_object_round_page(size);
vm_object_lock(object);
while (alloc_size) {
- register vm_page_t mem;
+ vm_page_t mem;
/*
vm_offset_t addr,
vm_offset_t *up_addr)
{
- if (VM_KERNEL_IS_SLID(addr) || VM_KERNEL_IS_KEXT(addr) ||
- VM_KERNEL_IS_PRELINKTEXT(addr) || VM_KERNEL_IS_PRELINKINFO(addr) ||
- VM_KERNEL_IS_KEXT_LINKEDIT(addr)) {
+ if (VM_KERNEL_IS_SLID(addr)) {
*up_addr = addr - vm_kernel_slide;
return;
}