extern kern_return_t kmem_alloc_pages(
register vm_object_t object,
register vm_object_offset_t offset,
- register vm_offset_t start,
- register vm_offset_t end,
- vm_prot_t protection);
+ register vm_size_t size);
extern void kmem_remap_pages(
register vm_object_t object,
/*
* Since we have not given out this address yet,
- * it is safe to unlock the map.
+ * it is safe to unlock the map. Except of course
+ * we must make certain no one coalesces our address
+ * or does a blind vm_deallocate and removes the object
+ * an extra object reference will suffice to protect
+ * against both contingencies.
*/
+ vm_object_reference(object);
vm_map_unlock(map);
vm_object_lock(object);
offset + (vm_object_offset_t)i);
vm_object_unlock(object);
vm_map_remove(map, addr, addr + size, 0);
+ vm_object_deallocate(object);
return KERN_RESOURCE_SHORTAGE;
}
vm_object_unlock(object);
vm_object_unlock(object);
}
vm_map_remove(map, addr, addr + size, 0);
+ vm_object_deallocate(object);
return (kr);
}
+ /* now that the page is wired, we no longer have to fear coalesce */
+ vm_object_deallocate(object);
if (object == kernel_object)
vm_map_simplify(map, addr);
vm_offset_t *newaddrp,
vm_size_t newsize)
{
- vm_offset_t oldmin, oldmax;
- vm_offset_t newaddr;
- vm_object_t object;
- vm_map_entry_t oldentry, newentry;
- kern_return_t kr;
+ vm_offset_t oldmin, oldmax;
+ vm_offset_t newaddr;
+ vm_offset_t offset;
+ vm_object_t object;
+ vm_map_entry_t oldentry, newentry;
+ vm_page_t mem;
+ kern_return_t kr;
oldmin = trunc_page(oldaddr);
oldmax = round_page(oldaddr + oldsize);
oldsize = oldmax - oldmin;
newsize = round_page(newsize);
- /*
- * Find space for the new region.
- */
-
- kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0,
- &newentry);
- if (kr != KERN_SUCCESS) {
- return kr;
- }
/*
* Find the VM object backing the old region.
*/
+ vm_map_lock(map);
+
if (!vm_map_lookup_entry(map, oldmin, &oldentry))
panic("kmem_realloc");
object = oldentry->object.vm_object;
*/
vm_object_reference(object);
+ /* by grabbing the object lock before unlocking the map */
+ /* we guarantee that we will panic if more than one */
+ /* attempt is made to realloc a kmem_alloc'd area */
vm_object_lock(object);
+ vm_map_unlock(map);
if (object->size != oldsize)
panic("kmem_realloc");
object->size = newsize;
vm_object_unlock(object);
- newentry->object.vm_object = object;
- newentry->offset = 0;
- assert (newentry->wired_count == 0);
- newentry->wired_count = 1;
+ /* allocate the new pages while expanded portion of the */
+ /* object is still not mapped */
+ kmem_alloc_pages(object, oldsize, newsize-oldsize);
+
/*
- * Since we have not given out this address yet,
- * it is safe to unlock the map. We are trusting
- * that nobody will play with either region.
+ * Find space for the new region.
*/
+ kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0,
+ &newentry);
+ if (kr != KERN_SUCCESS) {
+ vm_object_lock(object);
+ for(offset = oldsize;
+ offset<newsize; offset+=PAGE_SIZE) {
+ if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+ vm_page_lock_queues();
+ vm_page_free(mem);
+ vm_page_unlock_queues();
+ }
+ }
+ object->size = oldsize;
+ vm_object_unlock(object);
+ vm_object_deallocate(object);
+ return kr;
+ }
+ newentry->object.vm_object = object;
+ newentry->offset = 0;
+ assert (newentry->wired_count == 0);
+
+
+ /* add an extra reference in case we have someone doing an */
+ /* unexpected deallocate */
+ vm_object_reference(object);
vm_map_unlock(map);
- /*
- * Remap the pages in the old region and
- * allocate more pages for the new region.
- */
+ if ((kr = vm_map_wire(map, newaddr, newaddr + newsize,
+ VM_PROT_DEFAULT, FALSE)) != KERN_SUCCESS) {
+ vm_map_remove(map, newaddr, newaddr + newsize, 0);
+ vm_object_lock(object);
+ for(offset = oldsize;
+ offset<newsize; offset+=PAGE_SIZE) {
+ if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+ vm_page_lock_queues();
+ vm_page_free(mem);
+ vm_page_unlock_queues();
+ }
+ }
+ object->size = oldsize;
+ vm_object_unlock(object);
+ vm_object_deallocate(object);
+ return (kr);
+ }
+ vm_object_deallocate(object);
- kmem_remap_pages(object, 0,
- newaddr, newaddr + oldsize,
- VM_PROT_DEFAULT);
- kmem_alloc_pages(object, oldsize,
- newaddr + oldsize, newaddr + newsize,
- VM_PROT_DEFAULT);
*newaddrp = newaddr;
return KERN_SUCCESS;
}
/*
- * Allocate new wired pages in an object.
- * The object is assumed to be mapped into the kernel map or
- * a submap.
+ * Allocate new pages in an object.
*/
kern_return_t
kmem_alloc_pages(
register vm_object_t object,
register vm_object_offset_t offset,
- register vm_offset_t start,
- register vm_offset_t end,
- vm_prot_t protection)
+ register vm_size_t size)
{
- /*
- * Mark the pmap region as not pageable.
- */
- pmap_pageable(kernel_pmap, start, end, FALSE);
- while (start < end) {
+ size = round_page(size);
+ vm_object_lock(object);
+ while (size) {
register vm_page_t mem;
- vm_object_lock(object);
/*
* Allocate a page
vm_object_lock(object);
}
- /*
- * Wire it down
- */
- vm_page_lock_queues();
- vm_page_wire(mem);
- vm_page_unlock_queues();
- vm_object_unlock(object);
-
- /*
- * Enter it in the kernel pmap
- */
- PMAP_ENTER(kernel_pmap, start, mem,
- protection, TRUE);
- vm_object_lock(object);
- PAGE_WAKEUP_DONE(mem);
- vm_object_unlock(object);
-
- start += PAGE_SIZE;
- offset += PAGE_SIZE_64;
+ offset += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ mem->busy = FALSE;
}
+ vm_object_unlock(object);
return KERN_SUCCESS;
}
* Enter it in the kernel pmap. The page isn't busy,
* but this shouldn't be a problem because it is wired.
*/
- PMAP_ENTER(kernel_pmap, start, mem,
- protection, TRUE);
+ PMAP_ENTER(kernel_pmap, start, mem, protection,
+ VM_WIMG_USE_DEFAULT, TRUE);
start += PAGE_SIZE;
offset += PAGE_SIZE;
vm_object_deallocate(vm_submap_object);
return (kr);
}
-
*new_map = map;
return (KERN_SUCCESS);
}
+ vm_page_inactive_count));
}
-/*
- * kmem_io_map_copyout:
- *
- * Establish temporary mapping in designated map for the memory
- * passed in. Memory format must be a page_list vm_map_copy.
- */
-
-kern_return_t
-kmem_io_map_copyout(
- vm_map_t map,
- vm_offset_t *addr, /* actual addr of data */
- vm_size_t *alloc_size, /* size allocated */
- vm_map_copy_t copy,
- vm_size_t min_size, /* Do at least this much */
- vm_prot_t prot) /* Protection of mapping */
-{
- vm_offset_t myaddr, offset;
- vm_size_t mysize, copy_size;
- kern_return_t ret;
- register
- vm_page_t *page_list;
- vm_map_copy_t new_copy;
- register
- int i;
-
- assert(copy->type == VM_MAP_COPY_PAGE_LIST);
- assert(min_size != 0);
-
- /*
- * Figure out the size in vm pages.
- */
- min_size += (vm_size_t)(copy->offset - trunc_page_64(copy->offset));
- min_size = round_page(min_size);
- mysize = (vm_size_t)(round_page_64(
- copy->offset + (vm_object_offset_t)copy->size) -
- trunc_page_64(copy->offset));
-
- /*
- * If total size is larger than one page list and
- * we don't have to do more than one page list, then
- * only do one page list.
- *
- * XXX Could be much smarter about this ... like trimming length
- * XXX if we need more than one page list but not all of them.
- */
-
- copy_size = ptoa(copy->cpy_npages);
- if (mysize > copy_size && copy_size > min_size)
- mysize = copy_size;
-
- /*
- * Allocate some address space in the map (must be kernel
- * space).
- */
- myaddr = vm_map_min(map);
- ret = vm_map_enter(map, &myaddr, mysize,
- (vm_offset_t) 0, TRUE,
- VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
- prot, prot, VM_INHERIT_DEFAULT);
-
- if (ret != KERN_SUCCESS)
- return(ret);
-
- /*
- * Tell the pmap module that this will be wired, and
- * enter the mappings.
- */
- pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE);
-
- *addr = myaddr + (vm_offset_t)
- (copy->offset - trunc_page_64(copy->offset));
- *alloc_size = mysize;
-
- offset = myaddr;
- page_list = ©->cpy_page_list[0];
- while (TRUE) {
- for ( i = 0; i < copy->cpy_npages; i++, offset+=PAGE_SIZE_64) {
- PMAP_ENTER(vm_map_pmap(map),
- (vm_offset_t)offset, *page_list,
- prot, TRUE);
- page_list++;
- }
-
- if (offset == (myaddr + mysize))
- break;
-
- /*
- * Onward to the next page_list. The extend_cont
- * leaves the current page list's pages alone;
- * they'll be cleaned up at discard. Reset this
- * copy's continuation to discard the next one.
- */
- vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret);
-
- if (ret != KERN_SUCCESS) {
- kmem_io_map_deallocate(map, myaddr, mysize);
- return(ret);
- }
- copy->cpy_cont = vm_map_copy_discard_cont;
- copy->cpy_cont_args = (vm_map_copyin_args_t) new_copy;
- assert(new_copy != VM_MAP_COPY_NULL);
- assert(new_copy->type == VM_MAP_COPY_PAGE_LIST);
- copy = new_copy;
- page_list = ©->cpy_page_list[0];
- }
-
- return(ret);
-}
-
-/*
- * kmem_io_map_deallocate:
- *
- * Get rid of the mapping established by kmem_io_map_copyout.
- * Assumes that addr and size have been rounded to page boundaries.
- */
-
-void
-kmem_io_map_deallocate(
- vm_map_t map,
- vm_offset_t addr,
- vm_size_t size)
-{
-
- register vm_offset_t va, end;
-
- end = round_page(addr + size);
- for (va = trunc_page(addr); va < end; va += PAGE_SIZE)
- pmap_change_wiring(vm_map_pmap(map), va, FALSE);
-
- /*
- * Remove the mappings. The pmap_remove is needed.
- */
-
- pmap_remove(vm_map_pmap(map), addr, addr + size);
- vm_map_remove(map, addr, addr + size, VM_MAP_REMOVE_KUNWIRE);
-}
-
/*
* kmem_io_object_trunc:
return TRUE;
}
+
+
+kern_return_t
+vm_conflict_check(
+ vm_map_t map,
+ vm_offset_t off,
+ vm_size_t len,
+ memory_object_t pager,
+ vm_object_offset_t file_off)
+{
+ vm_map_entry_t entry;
+ vm_object_t obj;
+ vm_object_offset_t obj_off;
+ vm_map_t base_map;
+ vm_offset_t base_offset;
+ vm_offset_t original_offset;
+ kern_return_t kr;
+ vm_size_t local_len;
+
+ base_map = map;
+ base_offset = off;
+ original_offset = off;
+ kr = KERN_SUCCESS;
+ vm_map_lock(map);
+ while(vm_map_lookup_entry(map, off, &entry)) {
+ local_len = len;
+
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ vm_map_unlock(map);
+ return KERN_SUCCESS;
+ }
+ if (entry->is_sub_map) {
+ vm_map_t old_map;
+ old_map = map;
+ vm_map_lock(entry->object.sub_map);
+ map = entry->object.sub_map;
+ off = entry->offset + (off - entry->vme_start);
+ vm_map_unlock(old_map);
+ continue;
+ }
+ obj = entry->object.vm_object;
+ obj_off = (off - entry->vme_start) + entry->offset;
+ while(obj->shadow) {
+ obj_off += obj->shadow_offset;
+ obj = obj->shadow;
+ }
+ if((obj->pager_created) && (obj->pager == pager)) {
+ if(((obj->paging_offset) + obj_off) == file_off) {
+ if(off != base_offset) {
+ vm_map_unlock(map);
+ return KERN_FAILURE;
+ }
+ kr = KERN_ALREADY_WAITING;
+ } else if(
+ ((file_off < ((obj->paging_offset) + obj_off)) &&
+ ((file_off + len) >
+ ((obj->paging_offset) + obj_off))) ||
+ ((file_off > ((obj->paging_offset) + obj_off)) &&
+ (((((obj->paging_offset) + obj_off)) + len)
+ > file_off))) {
+ vm_map_unlock(map);
+ return KERN_FAILURE;
+ }
+ } else if(kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+
+ if(len < ((entry->vme_end - entry->vme_start) -
+ (off - entry->vme_start))) {
+ vm_map_unlock(map);
+ return kr;
+ } else {
+ len -= (entry->vme_end - entry->vme_start) -
+ (off - entry->vme_start);
+ }
+ base_offset = base_offset + (local_len - len);
+ file_off = file_off + (local_len - len);
+ off = base_offset;
+ if(map != base_map) {
+ vm_map_unlock(map);
+ vm_map_lock(base_map);
+ map = base_map;
+ }
+ }
+
+ vm_map_unlock(map);
+ return kr;
+
+
+}