#include <mach/kern_return.h>
#include <mach/vm_param.h>
#include <kern/assert.h>
-#include <kern/lock.h>
#include <kern/thread.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
* Forward declarations for internal functions.
*/
extern kern_return_t kmem_alloc_pages(
- register vm_object_t object,
- register vm_object_offset_t offset,
- register vm_object_size_t size);
-
-extern void kmem_remap_pages(
- register vm_object_t object,
- register vm_object_offset_t offset,
- register vm_offset_t start,
- register vm_offset_t end,
- vm_prot_t protection);
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size);
kern_return_t
kmem_alloc_contig(
vm_offset_t mask,
ppnum_t max_pnum,
ppnum_t pnum_mask,
- int flags)
+ int flags,
+ vm_tag_t tag)
{
vm_object_t object;
vm_object_offset_t offset;
if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT)))
return KERN_INVALID_ARGUMENT;
- map_size = vm_map_round_page(size);
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
map_mask = (vm_map_offset_t)mask;
/* Check for zero allocation size (either directly or via overflow) */
return kr;
}
- entry->object.vm_object = object;
- entry->offset = offset = (object == kernel_object) ?
- map_addr : 0;
+ if (object == kernel_object) {
+ offset = map_addr;
+ } else {
+ offset = 0;
+ }
+ VME_OBJECT_SET(entry, object);
+ VME_OFFSET_SET(entry, offset);
+ VME_ALIAS_SET(entry, tag);
/* Take an extra object ref in case the map entry gets deleted */
vm_object_reference(object);
kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);
if (kr != KERN_SUCCESS) {
- vm_map_remove(map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size), 0);
+ vm_map_remove(map,
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ 0);
vm_object_deallocate(object);
*addrp = 0;
return kr;
}
vm_object_unlock(object);
- if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
- != KERN_SUCCESS) {
+ kr = vm_map_wire(map,
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag),
+ FALSE);
+
+ if (kr != KERN_SUCCESS) {
if (object == kernel_object) {
vm_object_lock(object);
vm_object_page_remove(object, offset, offset + map_size);
vm_object_unlock(object);
}
- vm_map_remove(map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size), 0);
+ vm_map_remove(map,
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ 0);
vm_object_deallocate(object);
return kr;
}
kern_return_t
kernel_memory_allocate(
- register vm_map_t map,
- register vm_offset_t *addrp,
- register vm_size_t size,
- register vm_offset_t mask,
- int flags)
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_offset_t mask,
+ int flags,
+ vm_tag_t tag)
{
vm_object_t object;
vm_object_offset_t offset;
vm_object_offset_t pg_offset;
- vm_map_entry_t entry;
+ vm_map_entry_t entry = NULL;
vm_map_offset_t map_addr, fill_start;
vm_map_offset_t map_mask;
vm_map_size_t map_size, fill_size;
- kern_return_t kr;
+ kern_return_t kr, pe_result;
vm_page_t mem;
vm_page_t guard_page_list = NULL;
vm_page_t wired_page_list = NULL;
panic("kernel_memory_allocate: VM is not ready");
}
- map_size = vm_map_round_page(size);
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
map_mask = (vm_map_offset_t) mask;
- vm_alloc_flags = 0;
+
+ vm_alloc_flags = VM_MAKE_TAG(tag);
/* Check for zero allocation size (either directly or via overflow) */
if (map_size == 0) {
* limit the size of a single extent of wired memory
* to try and limit the damage to the system if
* too many pages get wired down
+ * limit raised to 2GB with 128GB max physical limit,
+ * but scaled by installed memory above this
*/
- if (map_size > (1 << 30)) {
+ if ( !(flags & KMA_VAONLY) && map_size > MAX(1ULL<<31, sane_size/64)) {
return KERN_RESOURCE_SHORTAGE;
}
}
vm_page_more_fictitious();
}
- mem->pageq.next = (queue_entry_t)guard_page_list;
+ mem->snext = guard_page_list;
guard_page_list = mem;
}
+ if (! (flags & KMA_VAONLY)) {
for (i = 0; i < wired_page_count; i++) {
uint64_t unavailable;
}
VM_PAGE_WAIT();
}
- mem->pageq.next = (queue_entry_t)wired_page_list;
+ mem->snext = wired_page_list;
wired_page_list = mem;
}
+ }
/*
* Allocate a new object (if necessary). We must do this before
if ((flags & KMA_KOBJECT) != 0) {
object = kernel_object;
vm_object_reference(object);
+ } else if ((flags & KMA_COMPRESSOR) != 0) {
+ object = compressor_object;
+ vm_object_reference(object);
} else {
object = vm_object_allocate(map_size);
}
+ if (flags & KMA_ATOMIC)
+ vm_alloc_flags |= VM_FLAGS_ATOMIC_ENTRY;
+
kr = vm_map_find_space(map, &map_addr,
fill_size, map_mask,
vm_alloc_flags, &entry);
goto out;
}
- entry->object.vm_object = object;
- entry->offset = offset = (object == kernel_object) ?
- map_addr : 0;
-
- entry->wired_count++;
+ if (object == kernel_object || object == compressor_object) {
+ offset = map_addr;
+ } else {
+ offset = 0;
+ }
+ VME_OBJECT_SET(entry, object);
+ VME_OFFSET_SET(entry, offset);
+
+ if (object != compressor_object)
+ entry->wired_count++;
if (flags & KMA_PERMANENT)
entry->permanent = TRUE;
- if (object != kernel_object)
+ if (object != kernel_object && object != compressor_object)
vm_object_reference(object);
vm_object_lock(object);
panic("kernel_memory_allocate: guard_page_list == NULL");
mem = guard_page_list;
- guard_page_list = (vm_page_t)mem->pageq.next;
- mem->pageq.next = NULL;
+ guard_page_list = mem->snext;
+ mem->snext = NULL;
vm_page_insert(mem, object, offset + pg_offset);
kma_prot = VM_PROT_READ | VM_PROT_WRITE;
+ if (flags & KMA_VAONLY) {
+ pg_offset = fill_start + fill_size;
+ } else {
for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
if (wired_page_list == NULL)
panic("kernel_memory_allocate: wired_page_list == NULL");
mem = wired_page_list;
- wired_page_list = (vm_page_t)mem->pageq.next;
- mem->pageq.next = NULL;
+ wired_page_list = mem->snext;
+ mem->snext = NULL;
+
+ assert(mem->wire_count == 0);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+
+ mem->vm_page_q_state = VM_PAGE_IS_WIRED;
mem->wire_count++;
+ if (__improbable(mem->wire_count == 0)) {
+ panic("kernel_memory_allocate(%p): wire_count overflow",
+ mem);
+ }
- vm_page_insert(mem, object, offset + pg_offset);
+ vm_page_insert_wired(mem, object, offset + pg_offset, tag);
mem->busy = FALSE;
mem->pmapped = TRUE;
mem->wpmapped = TRUE;
- PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
- kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+ PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
+ kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+ vm_object_unlock(object);
+ PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
+ kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+
+ vm_object_lock(object);
+ }
if (flags & KMA_NOENCRYPT) {
bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
- pmap_set_noencrypt(mem->phys_page);
+ pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
}
}
+ }
if ((fill_start + fill_size) < map_size) {
if (guard_page_list == NULL)
panic("kernel_memory_allocate: guard_page_list == NULL");
mem = guard_page_list;
- guard_page_list = (vm_page_t)mem->pageq.next;
- mem->pageq.next = NULL;
+ guard_page_list = mem->snext;
+ mem->snext = NULL;
vm_page_insert(mem, object, offset + pg_offset);
if (guard_page_list || wired_page_list)
panic("kernel_memory_allocate: non empty list\n");
+ if (! (flags & KMA_VAONLY)) {
vm_page_lockspin_queues();
vm_page_wire_count += wired_page_count;
vm_page_unlock_queues();
+ }
vm_object_unlock(object);
/*
* now that the pages are wired, we no longer have to fear coalesce
*/
- if (object == kernel_object)
+ if (object == kernel_object || object == compressor_object)
vm_map_simplify(map, map_addr);
else
vm_object_deallocate(object);
return kr;
}
+kern_return_t
+kernel_memory_populate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags,
+ vm_tag_t tag)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ kern_return_t kr, pe_result;
+ vm_page_t mem;
+ vm_page_t page_list = NULL;
+ int page_count = 0;
+ int i;
+
+ page_count = (int) (size / PAGE_SIZE_64);
+
+ assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+ if (flags & KMA_COMPRESSOR) {
+
+ pg_offset = page_count * PAGE_SIZE_64;
+
+ do {
+ for (;;) {
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ VM_PAGE_WAIT();
+ }
+ mem->snext = page_list;
+ page_list = mem;
+
+ pg_offset -= PAGE_SIZE_64;
+
+ kr = pmap_enter_options(kernel_pmap,
+ addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
+ PMAP_OPTIONS_INTERNAL, NULL);
+ assert(kr == KERN_SUCCESS);
+
+ } while (pg_offset);
+
+ offset = addr;
+ object = compressor_object;
+
+ vm_object_lock(object);
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ mem = page_list;
+ page_list = mem->snext;
+ mem->snext = NULL;
+
+ vm_page_insert(mem, object, offset + pg_offset);
+ assert(mem->busy);
+
+ mem->busy = FALSE;
+ mem->pmapped = TRUE;
+ mem->wpmapped = TRUE;
+ mem->vm_page_q_state = VM_PAGE_USED_BY_COMPRESSOR;
+ }
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ for (;;) {
+ if (flags & KMA_LOMEM)
+ mem = vm_page_grablo();
+ else
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ if (flags & KMA_NOPAGEWAIT) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ if ((flags & KMA_LOMEM) &&
+ (vm_lopage_needed == TRUE)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ VM_PAGE_WAIT();
+ }
+ mem->snext = page_list;
+ page_list = mem;
+ }
+ if (flags & KMA_KOBJECT) {
+ offset = addr;
+ object = kernel_object;
+
+ vm_object_lock(object);
+ } else {
+ /*
+ * If it's not the kernel object, we need to:
+ * lock map;
+ * lookup entry;
+ * lock object;
+ * take reference on object;
+ * unlock map;
+ */
+ panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
+ "!KMA_KOBJECT",
+ map, (uint64_t) addr, (uint64_t) size, flags);
+ }
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ if (page_list == NULL)
+ panic("kernel_memory_populate: page_list == NULL");
+
+ mem = page_list;
+ page_list = mem->snext;
+ mem->snext = NULL;
+
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ mem->vm_page_q_state = VM_PAGE_IS_WIRED;
+ mem->wire_count++;
+ if (__improbable(mem->wire_count == 0)) {
+ panic("kernel_memory_populate(%p): wire_count overflow",
+ mem);
+ }
+
+ vm_page_insert_wired(mem, object, offset + pg_offset, tag);
+
+ mem->busy = FALSE;
+ mem->pmapped = TRUE;
+ mem->wpmapped = TRUE;
+
+ PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+
+ vm_object_unlock(object);
+
+ PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+
+ vm_object_lock(object);
+ }
+ if (flags & KMA_NOENCRYPT) {
+ bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
+ pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
+ }
+ }
+ vm_page_lock_queues();
+ vm_page_wire_count += page_count;
+ vm_page_unlock_queues();
+
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+
+out:
+ if (page_list)
+ vm_page_free_list(page_list, FALSE);
+
+ return kr;
+}
+
+
+void
+kernel_memory_depopulate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ vm_page_t mem;
+ vm_page_t local_freeq = NULL;
+
+ assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+ if (flags & KMA_COMPRESSOR) {
+ offset = addr;
+ object = compressor_object;
+
+ vm_object_lock(object);
+ } else if (flags & KMA_KOBJECT) {
+ offset = addr;
+ object = kernel_object;
+
+ vm_object_lock(object);
+ } else {
+ offset = 0;
+ object = NULL;
+ /*
+ * If it's not the kernel object, we need to:
+ * lock map;
+ * lookup entry;
+ * lock object;
+ * unlock map;
+ */
+ panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
+ "!KMA_KOBJECT",
+ map, (uint64_t) addr, (uint64_t) size, flags);
+ }
+ pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ mem = vm_page_lookup(object, offset + pg_offset);
+
+ assert(mem);
+
+ if (mem->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR)
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
+
+ mem->busy = TRUE;
+
+ assert(mem->tabled);
+ vm_page_remove(mem, TRUE);
+ assert(mem->busy);
+
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+ assert((mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q));
+
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ mem->snext = local_freeq;
+ local_freeq = mem;
+ }
+ vm_object_unlock(object);
+
+ if (local_freeq)
+ vm_page_free_list(local_freeq, TRUE);
+}
+
/*
* kmem_alloc:
*
*/
kern_return_t
-kmem_alloc(
+kmem_alloc_external(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
- kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, 0);
+ return (kmem_alloc(map, addrp, size, vm_tag_bt()));
+}
+
+
+kern_return_t
+kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag)
+{
+ return kmem_alloc_flags(map, addrp, size, tag, 0);
+}
+
+kern_return_t
+kmem_alloc_flags(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag,
+ int flags)
+{
+ kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag);
TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
return kr;
}
vm_offset_t oldaddr,
vm_size_t oldsize,
vm_offset_t *newaddrp,
- vm_size_t newsize)
+ vm_size_t newsize,
+ vm_tag_t tag)
{
vm_object_t object;
vm_object_offset_t offset;
vm_page_t mem;
kern_return_t kr;
- oldmapmin = vm_map_trunc_page(oldaddr);
- oldmapmax = vm_map_round_page(oldaddr + oldsize);
+ oldmapmin = vm_map_trunc_page(oldaddr,
+ VM_MAP_PAGE_MASK(map));
+ oldmapmax = vm_map_round_page(oldaddr + oldsize,
+ VM_MAP_PAGE_MASK(map));
oldmapsize = oldmapmax - oldmapmin;
- newmapsize = vm_map_round_page(newsize);
+ newmapsize = vm_map_round_page(newsize,
+ VM_MAP_PAGE_MASK(map));
/*
if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
panic("kmem_realloc");
- object = oldentry->object.vm_object;
+ object = VME_OBJECT(oldentry);
/*
* Increase the size of the object and
vm_object_deallocate(object);
return kr;
}
- newentry->object.vm_object = object;
- newentry->offset = 0;
- assert (newentry->wired_count == 0);
+ VME_OBJECT_SET(newentry, object);
+ VME_OFFSET_SET(newentry, 0);
+ VME_ALIAS_SET(newentry, tag);
+ assert(newentry->wired_count == 0);
/* add an extra reference in case we have someone doing an */
vm_object_reference(object);
vm_map_unlock(map);
- kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
+ kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize,
+ VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag), FALSE);
if (KERN_SUCCESS != kr) {
vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
vm_object_lock(object);
*/
kern_return_t
-kmem_alloc_kobject(
+kmem_alloc_kobject_external(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
{
- return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
+ return (kmem_alloc_kobject(map, addrp, size, vm_tag_bt()));
+}
+
+kern_return_t
+kmem_alloc_kobject(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag)
+{
+ return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag);
}
/*
kmem_alloc_aligned(
vm_map_t map,
vm_offset_t *addrp,
- vm_size_t size)
+ vm_size_t size,
+ vm_tag_t tag)
{
if ((size & (size - 1)) != 0)
panic("kmem_alloc_aligned: size not aligned");
- return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
+ return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT, tag);
}
/*
*/
kern_return_t
-kmem_alloc_pageable(
+kmem_alloc_pageable_external(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
+{
+ return (kmem_alloc_pageable(map, addrp, size, vm_tag_bt()));
+}
+
+kern_return_t
+kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag)
{
vm_map_offset_t map_addr;
vm_map_size_t map_size;
kern_return_t kr;
#ifndef normal
- map_addr = (vm_map_min(map)) + 0x1000;
+ map_addr = (vm_map_min(map)) + PAGE_SIZE;
#else
map_addr = vm_map_min(map);
#endif
- map_size = vm_map_round_page(size);
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
kr = vm_map_enter(map, &map_addr, map_size,
- (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
+ (vm_map_offset_t) 0,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(tag),
VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
return;
}
- kr = vm_map_remove(map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size),
- VM_MAP_REMOVE_KUNWIRE);
+ kr = vm_map_remove(map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_REMOVE_KUNWIRE);
if (kr != KERN_SUCCESS)
panic("kmem_free");
}
kern_return_t
kmem_alloc_pages(
- register vm_object_t object,
- register vm_object_offset_t offset,
- register vm_object_size_t size)
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size)
{
vm_object_size_t alloc_size;
alloc_size = vm_object_round_page(size);
vm_object_lock(object);
while (alloc_size) {
- register vm_page_t mem;
+ vm_page_t mem;
/*
return KERN_SUCCESS;
}
-/*
- * Remap wired pages in an object into a new region.
- * The object is assumed to be mapped into the kernel map or
- * a submap.
- */
-void
-kmem_remap_pages(
- register vm_object_t object,
- register vm_object_offset_t offset,
- register vm_offset_t start,
- register vm_offset_t end,
- vm_prot_t protection)
-{
-
- vm_map_offset_t map_start;
- vm_map_offset_t map_end;
-
- /*
- * Mark the pmap region as not pageable.
- */
- map_start = vm_map_trunc_page(start);
- map_end = vm_map_round_page(end);
-
- pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
-
- while (map_start < map_end) {
- register vm_page_t mem;
-
- vm_object_lock(object);
-
- /*
- * Find a page
- */
- if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
- panic("kmem_remap_pages");
-
- /*
- * Wire it down (again)
- */
- vm_page_lockspin_queues();
- vm_page_wire(mem);
- vm_page_unlock_queues();
- vm_object_unlock(object);
-
- /*
- * ENCRYPTED SWAP:
- * The page is supposed to be wired now, so it
- * shouldn't be encrypted at this point. It can
- * safely be entered in the page table.
- */
- ASSERT_PAGE_DECRYPTED(mem);
-
- /*
- * Enter it in the kernel pmap. The page isn't busy,
- * but this shouldn't be a problem because it is wired.
- */
-
- mem->pmapped = TRUE;
- mem->wpmapped = TRUE;
-
- PMAP_ENTER(kernel_pmap, map_start, mem, protection, VM_PROT_NONE, 0, TRUE);
-
- map_start += PAGE_SIZE;
- offset += PAGE_SIZE;
- }
-}
-
/*
* kmem_suballoc:
*
vm_map_size_t map_size;
kern_return_t kr;
- map_size = vm_map_round_page(size);
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(parent));
/*
* Need reference on submap object because it is internal
*/
vm_object_reference(vm_submap_object);
- map_addr = (flags & VM_FLAGS_ANYWHERE) ?
- vm_map_min(parent) : vm_map_trunc_page(*addr);
+ map_addr = ((flags & VM_FLAGS_ANYWHERE)
+ ? vm_map_min(parent)
+ : vm_map_trunc_page(*addr,
+ VM_MAP_PAGE_MASK(parent)));
kr = vm_map_enter(parent, &map_addr, map_size,
(vm_map_offset_t) 0, flags,
map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
if (map == VM_MAP_NULL)
panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
+ /* inherit the parent map's page size */
+ vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
if (kr != KERN_SUCCESS) {
vm_map_offset_t map_start;
vm_map_offset_t map_end;
- map_start = vm_map_trunc_page(start);
- map_end = vm_map_round_page(end);
+ map_start = vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(kernel_map));
+ map_end = vm_map_round_page(end,
+ VM_MAP_PAGE_MASK(kernel_map));
kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
map_end, FALSE);
while(vm_map_lookup_entry(map, off, &entry)) {
local_len = len;
- if (entry->object.vm_object == VM_OBJECT_NULL) {
+ if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
vm_map_unlock(map);
return KERN_SUCCESS;
}
vm_map_t old_map;
old_map = map;
- vm_map_lock(entry->object.sub_map);
- map = entry->object.sub_map;
- off = entry->offset + (off - entry->vme_start);
+ vm_map_lock(VME_SUBMAP(entry));
+ map = VME_SUBMAP(entry);
+ off = VME_OFFSET(entry) + (off - entry->vme_start);
vm_map_unlock(old_map);
continue;
}
- obj = entry->object.vm_object;
- obj_off = (off - entry->vme_start) + entry->offset;
+ obj = VME_OBJECT(entry);
+ obj_off = (off - entry->vme_start) + VME_OFFSET(entry);
while(obj->shadow) {
obj_off += obj->vo_shadow_offset;
obj = obj->shadow;
vm_map_unlock(map);
return kr;
}
+
+/*
+ *
+ * The following two functions are to be used when exposing kernel
+ * addresses to userspace via any of the various debug or info
+ * facilities that exist. These are basically the same as VM_KERNEL_ADDRPERM()
+ * and VM_KERNEL_UNSLIDE_OR_PERM() except they use a different random seed and
+ * are exported to KEXTs.
+ *
+ * NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL
+ */
+
+/*
+ * vm_kernel_addrperm_external:
+ *
+ * Used when exposing an address to userspace which is in the kernel's
+ * "heap". These addresses are not loaded from anywhere and are resultingly
+ * unslid. We apply a permutation value to obscure the address.
+ */
+void
+vm_kernel_addrperm_external(
+ vm_offset_t addr,
+ vm_offset_t *perm_addr)
+{
+ if (addr == 0) {
+ *perm_addr = 0;
+ return;
+ }
+
+ *perm_addr = (addr + vm_kernel_addrperm_ext);
+ return;
+}
+
+/*
+ * vm_kernel_unslide_or_perm_external:
+ *
+ * Use this macro when exposing an address to userspace that could come from
+ * either kernel text/data *or* the heap.
+ */
+void
+vm_kernel_unslide_or_perm_external(
+ vm_offset_t addr,
+ vm_offset_t *up_addr)
+{
+ if (VM_KERNEL_IS_SLID(addr)) {
+ *up_addr = addr - vm_kernel_slide;
+ return;
+ }
+
+ vm_kernel_addrperm_external(addr, up_addr);
+ return;
+}