]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_kern.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
index 8aa7177fb8a44951ef25161e370bdb8f67b5e2bf..68f203028ea8809e08703e08f4ede8ddf7e2abd5 100644 (file)
@@ -141,7 +141,7 @@ kmem_alloc_contig(
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
                return kr;
@@ -169,6 +169,7 @@ kmem_alloc_contig(
        for (i = 0; i < map_size; i += PAGE_SIZE) {
                m = pages;
                pages = NEXT_PAGE(m);
+               *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
                m->busy = FALSE;
                vm_page_insert(m, object, offset + i);
        }
@@ -207,6 +208,10 @@ kmem_alloc_contig(
  *               KMA_HERE              *addrp is base address, else "anywhere"
  *               KMA_NOPAGEWAIT        don't wait for pages if unavailable
  *               KMA_KOBJECT           use kernel_object
+ *               KMA_LOMEM             support for 32 bit devices in a 64 bit world
+ *                                     if set and a lomemory pool is available
+ *                                     grab pages from it... this also implies
+ *                                     KMA_NOPAGEWAIT
  */
 
 kern_return_t
@@ -230,6 +235,12 @@ kernel_memory_allocate(
                *addrp = 0;
                return KERN_INVALID_ARGUMENT;
        }
+       if (flags & KMA_LOMEM) {
+               if ( !(flags & KMA_NOPAGEWAIT) ) {
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
+               }
+       }
 
        map_size = vm_map_round_page(size);
        map_mask = (vm_map_offset_t) mask;
@@ -245,12 +256,11 @@ kernel_memory_allocate(
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
                return kr;
        }
-
        entry->object.vm_object = object;
        entry->offset = offset = (object == kernel_object) ? 
                        map_addr - VM_MIN_KERNEL_ADDRESS : 0;
@@ -262,8 +272,15 @@ kernel_memory_allocate(
        for (i = 0; i < map_size; i += PAGE_SIZE) {
                vm_page_t       mem;
 
-               while (VM_PAGE_NULL == 
-                      (mem = vm_page_alloc(object, offset + i))) {
+               for (;;) {
+                       if (flags & KMA_LOMEM)
+                               mem = vm_page_alloclo(object, offset + i);
+                       else
+                               mem = vm_page_alloc(object, offset + i);
+
+                       if (mem != VM_PAGE_NULL)
+                               break;
+
                        if (flags & KMA_NOPAGEWAIT) {
                                if (object == kernel_object)
                                        vm_object_page_remove(object, offset, offset + i);
@@ -391,7 +408,7 @@ kmem_realloc(
         */
 
        kr = vm_map_find_space(map, &newmapaddr, newmapsize,
-                              (vm_map_offset_t) 0, &newentry);
+                              (vm_map_offset_t) 0, 0, &newentry);
        if (kr != KERN_SUCCESS) {
                vm_object_lock(object);
                for(offset = oldmapsize;