if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT)))
return KERN_INVALID_ARGUMENT;
+
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
+ map_mask = (vm_map_offset_t)mask;
- if (size == 0) {
+ /* Check for zero allocation size (either directly or via overflow) */
+ if (map_size == 0) {
*addrp = 0;
return KERN_INVALID_ARGUMENT;
}
- map_size = vm_map_round_page(size);
- map_mask = (vm_map_offset_t)mask;
-
/*
* Allocate a new object (if necessary) and the reference we
* will be donating to the map entry. We must do this before
kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);
if (kr != KERN_SUCCESS) {
- vm_map_remove(map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size), 0);
+ vm_map_remove(map,
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ 0);
vm_object_deallocate(object);
*addrp = 0;
return kr;
}
vm_object_unlock(object);
- if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
- != KERN_SUCCESS) {
+ kr = vm_map_wire(map,
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_PROT_DEFAULT,
+ FALSE);
+ if (kr != KERN_SUCCESS) {
if (object == kernel_object) {
vm_object_lock(object);
vm_object_page_remove(object, offset, offset + map_size);
vm_object_unlock(object);
}
- vm_map_remove(map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size), 0);
+ vm_map_remove(map,
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ 0);
vm_object_deallocate(object);
return kr;
}
vm_object_t object;
vm_object_offset_t offset;
vm_object_offset_t pg_offset;
- vm_map_entry_t entry;
+ vm_map_entry_t entry = NULL;
vm_map_offset_t map_addr, fill_start;
vm_map_offset_t map_mask;
vm_map_size_t map_size, fill_size;
- kern_return_t kr;
+ kern_return_t kr, pe_result;
vm_page_t mem;
vm_page_t guard_page_list = NULL;
vm_page_t wired_page_list = NULL;
int wired_page_count = 0;
int i;
int vm_alloc_flags;
+ vm_prot_t kma_prot;
if (! vm_kernel_ready) {
panic("kernel_memory_allocate: VM is not ready");
}
- if (size == 0) {
- *addrp = 0;
- return KERN_INVALID_ARGUMENT;
- }
- if (flags & KMA_LOMEM) {
- if ( !(flags & KMA_NOPAGEWAIT) ) {
- *addrp = 0;
- return KERN_INVALID_ARGUMENT;
- }
- }
-
- map_size = vm_map_round_page(size);
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
map_mask = (vm_map_offset_t) mask;
vm_alloc_flags = 0;
+ /* Check for zero allocation size (either directly or via overflow) */
+ if (map_size == 0) {
+ *addrp = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
/*
* limit the size of a single extent of wired memory
* to try and limit the damage to the system if
* too many pages get wired down
+ * limit raised to 2GB with 128GB max physical limit
*/
- if (map_size > (1 << 30)) {
+ if (map_size > (1ULL << 31)) {
return KERN_RESOURCE_SHORTAGE;
}
guard_page_list = mem;
}
+ if (! (flags & KMA_VAONLY)) {
for (i = 0; i < wired_page_count; i++) {
uint64_t unavailable;
kr = KERN_RESOURCE_SHORTAGE;
goto out;
}
+ if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
mem->pageq.next = (queue_entry_t)wired_page_list;
wired_page_list = mem;
}
+ }
/*
* Allocate a new object (if necessary). We must do this before
if ((flags & KMA_KOBJECT) != 0) {
object = kernel_object;
vm_object_reference(object);
+ } else if ((flags & KMA_COMPRESSOR) != 0) {
+ object = compressor_object;
+ vm_object_reference(object);
} else {
object = vm_object_allocate(map_size);
}
}
entry->object.vm_object = object;
- entry->offset = offset = (object == kernel_object) ?
+ entry->offset = offset = (object == kernel_object || object == compressor_object) ?
map_addr : 0;
-
- entry->wired_count++;
+
+ if (object != compressor_object)
+ entry->wired_count++;
if (flags & KMA_PERMANENT)
entry->permanent = TRUE;
- if (object != kernel_object)
+ if (object != kernel_object && object != compressor_object)
vm_object_reference(object);
vm_object_lock(object);
mem->busy = FALSE;
pg_offset += PAGE_SIZE_64;
}
+
+ kma_prot = VM_PROT_READ | VM_PROT_WRITE;
+
+ if (flags & KMA_VAONLY) {
+ pg_offset = fill_start + fill_size;
+ } else {
for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
if (wired_page_list == NULL)
panic("kernel_memory_allocate: wired_page_list == NULL");
mem->pmapped = TRUE;
mem->wpmapped = TRUE;
- PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
- VM_PROT_READ | VM_PROT_WRITE, object->wimg_bits & VM_WIMG_MASK, TRUE);
+ PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
+ kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+ vm_object_unlock(object);
+
+ PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
+ kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+
+ vm_object_lock(object);
+ }
+ if (flags & KMA_NOENCRYPT) {
+ bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
+
+ pmap_set_noencrypt(mem->phys_page);
+ }
+ }
}
if ((fill_start + fill_size) < map_size) {
if (guard_page_list == NULL)
if (guard_page_list || wired_page_list)
panic("kernel_memory_allocate: non empty list\n");
+ if (! (flags & KMA_VAONLY)) {
vm_page_lockspin_queues();
vm_page_wire_count += wired_page_count;
vm_page_unlock_queues();
+ }
vm_object_unlock(object);
/*
* now that the pages are wired, we no longer have to fear coalesce
*/
- if (object == kernel_object)
+ if (object == kernel_object || object == compressor_object)
vm_map_simplify(map, map_addr);
else
vm_object_deallocate(object);
return kr;
}
+kern_return_t
+kernel_memory_populate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ kern_return_t kr, pe_result;
+ vm_page_t mem;
+ vm_page_t page_list = NULL;
+ int page_count = 0;
+ int i;
+
+ page_count = (int) (size / PAGE_SIZE_64);
+
+ assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+ if (flags & KMA_COMPRESSOR) {
+
+ for (i = 0; i < page_count; i++) {
+ for (;;) {
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ VM_PAGE_WAIT();
+ }
+ mem->pageq.next = (queue_entry_t) page_list;
+ page_list = mem;
+ }
+ offset = addr;
+ object = compressor_object;
+
+ vm_object_lock(object);
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ mem = page_list;
+ page_list = (vm_page_t) mem->pageq.next;
+ mem->pageq.next = NULL;
+
+ vm_page_insert(mem, object, offset + pg_offset);
+ assert(mem->busy);
+
+ PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ 0, TRUE, PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+
+ vm_object_unlock(object);
+
+ PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
+
+ vm_object_lock(object);
+ }
+ mem->busy = FALSE;
+ mem->pmapped = TRUE;
+ mem->wpmapped = TRUE;
+ mem->compressor = TRUE;
+ }
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ for (;;) {
+ if (flags & KMA_LOMEM)
+ mem = vm_page_grablo();
+ else
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ if (flags & KMA_NOPAGEWAIT) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ if ((flags & KMA_LOMEM) &&
+ (vm_lopage_needed == TRUE)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ VM_PAGE_WAIT();
+ }
+ mem->pageq.next = (queue_entry_t) page_list;
+ page_list = mem;
+ }
+ if (flags & KMA_KOBJECT) {
+ offset = addr;
+ object = kernel_object;
+
+ vm_object_lock(object);
+ } else {
+ /*
+ * If it's not the kernel object, we need to:
+ * lock map;
+ * lookup entry;
+ * lock object;
+ * take reference on object;
+ * unlock map;
+ */
+ panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
+ "!KMA_KOBJECT",
+ map, (uint64_t) addr, (uint64_t) size, flags);
+ }
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ if (page_list == NULL)
+ panic("kernel_memory_populate: page_list == NULL");
+
+ mem = page_list;
+ page_list = (vm_page_t) mem->pageq.next;
+ mem->pageq.next = NULL;
+
+ mem->wire_count++;
+
+ vm_page_insert(mem, object, offset + pg_offset);
+
+ mem->busy = FALSE;
+ mem->pmapped = TRUE;
+ mem->wpmapped = TRUE;
+
+ PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+
+ vm_object_unlock(object);
+
+ PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
+
+ vm_object_lock(object);
+ }
+ if (flags & KMA_NOENCRYPT) {
+ bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
+ pmap_set_noencrypt(mem->phys_page);
+ }
+ }
+ vm_page_lock_queues();
+ vm_page_wire_count += page_count;
+ vm_page_unlock_queues();
+
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+
+out:
+ if (page_list)
+ vm_page_free_list(page_list, FALSE);
+
+ return kr;
+}
+
+
+void
+kernel_memory_depopulate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ vm_page_t mem;
+ vm_page_t local_freeq = NULL;
+
+ assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+ if (flags & KMA_COMPRESSOR) {
+ offset = addr;
+ object = compressor_object;
+
+ vm_object_lock(object);
+ } else if (flags & KMA_KOBJECT) {
+ offset = addr;
+ object = kernel_object;
+
+ vm_object_lock(object);
+ } else {
+ offset = 0;
+ object = NULL;
+ /*
+ * If it's not the kernel object, we need to:
+ * lock map;
+ * lookup entry;
+ * lock object;
+ * unlock map;
+ */
+ panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
+ "!KMA_KOBJECT",
+ map, (uint64_t) addr, (uint64_t) size, flags);
+ }
+ pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ mem = vm_page_lookup(object, offset + pg_offset);
+
+ assert(mem);
+
+ pmap_disconnect(mem->phys_page);
+
+ mem->busy = TRUE;
+
+ assert(mem->tabled);
+ vm_page_remove(mem, TRUE);
+ assert(mem->busy);
+
+ assert(mem->pageq.next == NULL &&
+ mem->pageq.prev == NULL);
+ mem->pageq.next = (queue_entry_t)local_freeq;
+ local_freeq = mem;
+ }
+ vm_object_unlock(object);
+
+ if (local_freeq)
+ vm_page_free_list(local_freeq, TRUE);
+}
+
/*
* kmem_alloc:
*
vm_page_t mem;
kern_return_t kr;
- oldmapmin = vm_map_trunc_page(oldaddr);
- oldmapmax = vm_map_round_page(oldaddr + oldsize);
+ oldmapmin = vm_map_trunc_page(oldaddr,
+ VM_MAP_PAGE_MASK(map));
+ oldmapmax = vm_map_round_page(oldaddr + oldsize,
+ VM_MAP_PAGE_MASK(map));
oldmapsize = oldmapmax - oldmapmin;
- newmapsize = vm_map_round_page(newsize);
+ newmapsize = vm_map_round_page(newsize,
+ VM_MAP_PAGE_MASK(map));
/*
/* attempt is made to realloc a kmem_alloc'd area */
vm_object_lock(object);
vm_map_unlock(map);
- if (object->size != oldmapsize)
+ if (object->vo_size != oldmapsize)
panic("kmem_realloc");
- object->size = newmapsize;
+ object->vo_size = newmapsize;
vm_object_unlock(object);
/* allocate the new pages while expanded portion of the */
VM_PAGE_FREE(mem);
}
}
- object->size = oldmapsize;
+ object->vo_size = oldmapsize;
vm_object_unlock(object);
vm_object_deallocate(object);
return kr;
VM_PAGE_FREE(mem);
}
}
- object->size = oldmapsize;
+ object->vo_size = oldmapsize;
vm_object_unlock(object);
vm_object_deallocate(object);
return (kr);
#else
map_addr = vm_map_min(map);
#endif
- map_size = vm_map_round_page(size);
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
kr = vm_map_enter(map, &map_addr, map_size,
(vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
return;
}
- kr = vm_map_remove(map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size),
- VM_MAP_REMOVE_KUNWIRE);
+ kr = vm_map_remove(map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_REMOVE_KUNWIRE);
if (kr != KERN_SUCCESS)
panic("kmem_free");
}
/*
* Mark the pmap region as not pageable.
*/
- map_start = vm_map_trunc_page(start);
- map_end = vm_map_round_page(end);
+ map_start = vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(kernel_map));
+ map_end = vm_map_round_page(end,
+ VM_MAP_PAGE_MASK(kernel_map));
pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
mem->pmapped = TRUE;
mem->wpmapped = TRUE;
- PMAP_ENTER(kernel_pmap, map_start, mem, protection,
- ((unsigned int)(mem->object->wimg_bits))
- & VM_WIMG_MASK,
- TRUE);
+ PMAP_ENTER(kernel_pmap, map_start, mem, protection, VM_PROT_NONE, 0, TRUE);
map_start += PAGE_SIZE;
offset += PAGE_SIZE;
vm_map_size_t map_size;
kern_return_t kr;
- map_size = vm_map_round_page(size);
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(parent));
/*
* Need reference on submap object because it is internal
*/
vm_object_reference(vm_submap_object);
- map_addr = (flags & VM_FLAGS_ANYWHERE) ?
- vm_map_min(parent) : vm_map_trunc_page(*addr);
+ map_addr = ((flags & VM_FLAGS_ANYWHERE)
+ ? vm_map_min(parent)
+ : vm_map_trunc_page(*addr,
+ VM_MAP_PAGE_MASK(parent)));
kr = vm_map_enter(parent, &map_addr, map_size,
(vm_map_offset_t) 0, flags,
map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
if (map == VM_MAP_NULL)
panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
+ /* inherit the parent map's page size */
+ vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
if (kr != KERN_SUCCESS) {
vm_map_offset_t map_start;
vm_map_offset_t map_end;
- map_start = vm_map_trunc_page(start);
- map_end = vm_map_round_page(end);
+ map_start = vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(kernel_map));
+ map_end = vm_map_round_page(end,
+ VM_MAP_PAGE_MASK(kernel_map));
- kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
+ kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
map_end, FALSE);
/*
* Reserve virtual memory allocated up to this time.
*/
- if (start != VM_MIN_KERNEL_ADDRESS) {
+ if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
vm_map_offset_t map_addr;
+ kern_return_t kr;
- map_addr = VM_MIN_KERNEL_ADDRESS;
- (void) vm_map_enter(kernel_map,
- &map_addr,
- (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
- (vm_map_offset_t) 0,
- VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
- VM_OBJECT_NULL,
- (vm_object_offset_t) 0, FALSE,
- VM_PROT_NONE, VM_PROT_NONE,
- VM_INHERIT_DEFAULT);
+ map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+ kr = vm_map_enter(kernel_map,
+ &map_addr,
+ (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+ (vm_map_offset_t) 0,
+ VM_FLAGS_FIXED | VM_FLAGS_NO_PMAP_CHECK,
+ VM_OBJECT_NULL,
+ (vm_object_offset_t) 0, FALSE,
+ VM_PROT_NONE, VM_PROT_NONE,
+ VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
+ panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
+ (uint64_t) start, (uint64_t) end,
+ (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+ (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+ kr);
+ }
}
-
- /*
- * Account for kernel memory (text, data, bss, vm shenanigans).
- * This may include inaccessible "holes" as determined by what
- * the machine-dependent init code includes in max_mem.
- */
- assert(atop_64(max_mem) == (unsigned int) atop_64(max_mem));
- vm_page_wire_count = ((unsigned int) atop_64(max_mem) -
- (vm_page_free_count +
- vm_page_active_count +
- vm_page_inactive_count));
-
/*
* Set the default global user wire limit which limits the amount of
* memory that can be locked via mlock(). We set this to the total
obj = entry->object.vm_object;
obj_off = (off - entry->vme_start) + entry->offset;
while(obj->shadow) {
- obj_off += obj->shadow_offset;
+ obj_off += obj->vo_shadow_offset;
obj = obj->shadow;
}
if((obj->pager_created) && (obj->pager == pager)) {