return KERN_INVALID_ARGUMENT;
}
- size = round_page(size);
+ size = round_page_32(size);
if ((flags & KMA_KOBJECT) == 0) {
object = vm_object_allocate(size);
kr = vm_map_find_space(map, &addr, size, mask, &entry);
vm_offset_t i;
kern_return_t kr;
- size = round_page(size);
+ size = round_page_32(size);
if ((flags & KMA_KOBJECT) == 0) {
/*
* Allocate a new object. We must do this before locking
vm_page_t mem;
kern_return_t kr;
- oldmin = trunc_page(oldaddr);
- oldmax = round_page(oldaddr + oldsize);
+ oldmin = trunc_page_32(oldaddr);
+ oldmax = round_page_32(oldaddr + oldsize);
oldsize = oldmax - oldmin;
- newsize = round_page(newsize);
+ newsize = round_page_32(newsize);
/*
#else
addr = vm_map_min(map);
#endif
- kr = vm_map_enter(map, &addr, round_page(size),
+ kr = vm_map_enter(map, &addr, round_page_32(size),
(vm_offset_t) 0, TRUE,
VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
{
kern_return_t kr;
- kr = vm_map_remove(map, trunc_page(addr),
- round_page(addr + size), VM_MAP_REMOVE_KUNWIRE);
+ kr = vm_map_remove(map, trunc_page_32(addr),
+ round_page_32(addr + size),
+ VM_MAP_REMOVE_KUNWIRE);
if (kr != KERN_SUCCESS)
panic("kmem_free");
}
register vm_size_t size)
{
- size = round_page(size);
+ size = round_page_32(size);
vm_object_lock(object);
while (size) {
register vm_page_t mem;
* but this shouldn't be a problem because it is wired.
*/
PMAP_ENTER(kernel_pmap, start, mem, protection,
- VM_WIMG_USE_DEFAULT, TRUE);
+ ((unsigned int)(mem->object->wimg_bits))
+ & VM_WIMG_MASK,
+ TRUE);
start += PAGE_SIZE;
offset += PAGE_SIZE;
vm_map_t map;
kern_return_t kr;
- size = round_page(size);
+ size = round_page_32(size);
/*
* Need reference on submap object because it is internal
/*
* Account for kernel memory (text, data, bss, vm shenanigans).
* This may include inaccessible "holes" as determined by what
- * the machine-dependent init code includes in mem_size.
+ * the machine-dependent init code includes in max_mem.
*/
- vm_page_wire_count = (atop(mem_size) - (vm_page_free_count
+ vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
+ vm_page_active_count
+ vm_page_inactive_count));
}
old_size = (vm_size_t)round_page_64(copy->size);
copy->size = new_size;
- new_size = round_page(new_size);
+ new_size = round_page_32(new_size);
vm_object_lock(copy->cpy_object);
vm_object_page_remove(copy->cpy_object,
}
if (entry->is_sub_map) {
vm_map_t old_map;
+
old_map = map;
vm_map_lock(entry->object.sub_map);
map = entry->object.sub_map;
return KERN_FAILURE;
}
kr = KERN_ALREADY_WAITING;
- } else if(
- ((file_off < ((obj->paging_offset) + obj_off)) &&
- ((file_off + len) >
- ((obj->paging_offset) + obj_off))) ||
- ((file_off > ((obj->paging_offset) + obj_off)) &&
- (((((obj->paging_offset) + obj_off)) + len)
- > file_off))) {
- vm_map_unlock(map);
- return KERN_FAILURE;
+ } else {
+ vm_object_offset_t obj_off_aligned;
+ vm_object_offset_t file_off_aligned;
+
+ obj_off_aligned = obj_off & ~PAGE_MASK;
+ file_off_aligned = file_off & ~PAGE_MASK;
+
+ if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
+ /*
+ * the target map and the file offset start in the same page
+ * but are not identical...
+ */
+ vm_map_unlock(map);
+ return KERN_FAILURE;
+ }
+ if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
+ ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
+ /*
+ * some portion of the tail of the I/O will fall
+ * within the encompass of the target map
+ */
+ vm_map_unlock(map);
+ return KERN_FAILURE;
+ }
+ if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
+ (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
+ /*
+ * the beginning page of the file offset falls within
+ * the target map's encompass
+ */
+ vm_map_unlock(map);
+ return KERN_FAILURE;
+ }
}
} else if(kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
return KERN_FAILURE;
}
- if(len < ((entry->vme_end - entry->vme_start) -
+ if(len <= ((entry->vme_end - entry->vme_start) -
(off - entry->vme_start))) {
vm_map_unlock(map);
return kr;
vm_map_unlock(map);
return kr;
-
-
}