object = vm_object_allocate(map_size);
}
- kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+ kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
if (KERN_SUCCESS != kr) {
vm_object_deallocate(object);
return kr;
for (i = 0; i < map_size; i += PAGE_SIZE) {
m = pages;
pages = NEXT_PAGE(m);
+ *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
m->busy = FALSE;
vm_page_insert(m, object, offset + i);
}
* KMA_HERE *addrp is base address, else "anywhere"
* KMA_NOPAGEWAIT don't wait for pages if unavailable
* KMA_KOBJECT use kernel_object
+ * KMA_LOMEM support for 32 bit devices in a 64 bit world
+ * if set and a lomemory pool is available
+ * grab pages from it... this also implies
+ * KMA_NOPAGEWAIT
*/
kern_return_t
*addrp = 0;
return KERN_INVALID_ARGUMENT;
}
+ if (flags & KMA_LOMEM) {
+ if ( !(flags & KMA_NOPAGEWAIT) ) {
+ *addrp = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
map_size = vm_map_round_page(size);
map_mask = (vm_map_offset_t) mask;
object = vm_object_allocate(map_size);
}
- kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+ kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
if (KERN_SUCCESS != kr) {
vm_object_deallocate(object);
return kr;
}
-
entry->object.vm_object = object;
entry->offset = offset = (object == kernel_object) ?
map_addr - VM_MIN_KERNEL_ADDRESS : 0;
for (i = 0; i < map_size; i += PAGE_SIZE) {
vm_page_t mem;
- while (VM_PAGE_NULL ==
- (mem = vm_page_alloc(object, offset + i))) {
+ for (;;) {
+ if (flags & KMA_LOMEM)
+ mem = vm_page_alloclo(object, offset + i);
+ else
+ mem = vm_page_alloc(object, offset + i);
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
if (flags & KMA_NOPAGEWAIT) {
if (object == kernel_object)
vm_object_page_remove(object, offset, offset + i);
*/
kr = vm_map_find_space(map, &newmapaddr, newmapsize,
- (vm_map_offset_t) 0, &newentry);
+ (vm_map_offset_t) 0, 0, &newentry);
if (kr != KERN_SUCCESS) {
vm_object_lock(object);
for(offset = oldmapsize;