]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_kern.c
xnu-1228.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
index d5ed1f659d2e4e6d543d799e65c3798bfc966a3c..f8b306855d61ae073847617ca059cef5722c7f5b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <vm/cpm.h>
 
 #include <string.h>
+
+#include <libkern/OSDebug.h>
+#include <sys/kdebug.h>
+
 /*
  *     Variables exported by this module.
  */
@@ -84,6 +88,8 @@
 vm_map_t       kernel_map;
 vm_map_t       kernel_pageable_map;
 
+extern boolean_t vm_kernel_ready;
+
 /*
  * Forward declarations for internal functions.
  */
@@ -105,6 +111,7 @@ kmem_alloc_contig(
        vm_offset_t             *addrp,
        vm_size_t               size,
        vm_offset_t             mask,
+       ppnum_t                 max_pnum,
        int                     flags)
 {
        vm_object_t             object;
@@ -139,7 +146,7 @@ kmem_alloc_contig(
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
                return kr;
@@ -153,7 +160,7 @@ kmem_alloc_contig(
        vm_object_reference(object);
        vm_map_unlock(map);
 
-       kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
+       kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, FALSE);
 
        if (kr != KERN_SUCCESS) {
                vm_map_remove(map, vm_map_trunc_page(map_addr),
@@ -167,6 +174,7 @@ kmem_alloc_contig(
        for (i = 0; i < map_size; i += PAGE_SIZE) {
                m = pages;
                pages = NEXT_PAGE(m);
+               *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
                m->busy = FALSE;
                vm_page_insert(m, object, offset + i);
        }
@@ -205,6 +213,10 @@ kmem_alloc_contig(
  *               KMA_HERE              *addrp is base address, else "anywhere"
  *               KMA_NOPAGEWAIT        don't wait for pages if unavailable
  *               KMA_KOBJECT           use kernel_object
+ *               KMA_LOMEM             support for 32 bit devices in a 64 bit world
+ *                                     if set and a lomemory pool is available
+ *                                     grab pages from it... this also implies
+ *                                     KMA_NOPAGEWAIT
  */
 
 kern_return_t
@@ -218,19 +230,69 @@ kernel_memory_allocate(
        vm_object_t             object;
        vm_object_offset_t      offset;
        vm_map_entry_t          entry;
-       vm_map_offset_t         map_addr;
+       vm_map_offset_t         map_addr, fill_start;
        vm_map_offset_t         map_mask;
-       vm_map_size_t           map_size;
+       vm_map_size_t           map_size, fill_size;
        vm_map_size_t           i;
        kern_return_t           kr;
+       vm_page_t               mem;
+       int                     vm_alloc_flags;
+
+       if (! vm_kernel_ready) {
+               panic("kernel_memory_allocate: VM is not ready");
+       }
 
        if (size == 0) {
                *addrp = 0;
                return KERN_INVALID_ARGUMENT;
        }
+       if (flags & KMA_LOMEM) {
+               if ( !(flags & KMA_NOPAGEWAIT) ) {
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
+               }
+       }
 
        map_size = vm_map_round_page(size);
        map_mask = (vm_map_offset_t) mask;
+       vm_alloc_flags = 0;
+
+       /*
+        * Guard pages:
+        *
+        * Guard pages are implemented as ficticious pages.  By placing guard pages
+        * on either end of a stack, they can help detect cases where a thread walks
+        * off either end of its stack.  They are allocated and set up here and attempts
+        * to access those pages are trapped in vm_fault_page().
+        *
+        * The map_size we were passed may include extra space for
+        * guard pages.  If those were requested, then back it out of fill_size
+        * since vm_map_find_space() takes just the actual size not including
+        * guard pages.  Similarly, fill_start indicates where the actual pages
+        * will begin in the range.
+        */
+
+       fill_start = 0;
+       fill_size = map_size;
+       if (flags & KMA_GUARD_FIRST) {
+               vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE;
+               fill_start += PAGE_SIZE_64;
+               fill_size -= PAGE_SIZE_64;
+               if (map_size < fill_start + fill_size) {
+                       /* no space for a guard page */
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
+               }
+       }
+       if (flags & KMA_GUARD_LAST) {
+               vm_alloc_flags |= VM_FLAGS_GUARD_AFTER;
+               fill_size -= PAGE_SIZE_64;
+               if (map_size <= fill_start + fill_size) {
+                       /* no space for a guard page */
+                       *addrp = 0;
+                       return KERN_INVALID_ARGUMENT;
+               }
+       }
 
        /*
         *      Allocate a new object (if necessary).  We must do this before
@@ -243,7 +305,9 @@ kernel_memory_allocate(
                object = vm_object_allocate(map_size);
        }
 
-       kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
+       kr = vm_map_find_space(map, &map_addr,
+                              fill_size, map_mask,
+                              vm_alloc_flags, &entry);
        if (KERN_SUCCESS != kr) {
                vm_object_deallocate(object);
                return kr;
@@ -257,38 +321,85 @@ kernel_memory_allocate(
        vm_map_unlock(map);
 
        vm_object_lock(object);
-       for (i = 0; i < map_size; i += PAGE_SIZE) {
-               vm_page_t       mem;
 
-               while (VM_PAGE_NULL == 
-                      (mem = vm_page_alloc(object, offset + i))) {
+       /*
+        * Allocate the lower guard page if one was requested.  The guard
+        * page extends up to fill_start which is where the real memory
+        * begins.
+        */
+
+       for (i = 0; i < fill_start; i += PAGE_SIZE) {
+               for (;;) {
+                       mem = vm_page_alloc_guard(object, offset + i);
+                       if (mem != VM_PAGE_NULL)
+                               break;
                        if (flags & KMA_NOPAGEWAIT) {
-                               if (object == kernel_object)
-                                       vm_object_page_remove(object, offset, offset + i);
-                               vm_object_unlock(object);
-                               vm_map_remove(map, map_addr, map_addr + map_size, 0);
-                               vm_object_deallocate(object);
-                               return KERN_RESOURCE_SHORTAGE;
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto nopage;
                        }
                        vm_object_unlock(object);
-                       VM_PAGE_WAIT();
+                       vm_page_more_fictitious();
                        vm_object_lock(object);
                }
                mem->busy = FALSE;
        }
-       vm_object_unlock(object);
 
-       if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE)) 
-               != KERN_SUCCESS) {
-               if (object == kernel_object) {
+       /*
+        * Allocate the real memory here.  This extends from offset fill_start
+        * for fill_size bytes.
+        */
+
+       for (i = fill_start; i < fill_start + fill_size; i += PAGE_SIZE) {
+               for (;;) {
+                       if (flags & KMA_LOMEM)
+                               mem = vm_page_alloclo(object, offset + i);
+                       else
+                               mem = vm_page_alloc(object, offset + i);
+
+                       if (mem != VM_PAGE_NULL)
+                               break;
+
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto nopage;
+                       }
+                       vm_object_unlock(object);
+                       VM_PAGE_WAIT();
                        vm_object_lock(object);
-                       vm_object_page_remove(object, offset, offset + map_size);
+               }
+               mem->busy = FALSE;
+       }
+
+       /*
+        * Lastly, allocate the ending guard page if requested.  This starts at the ending
+        * address from the loop above up to the map_size that was originaly 
+        * requested.
+        */
+
+       for (i = fill_start + fill_size; i < map_size; i += PAGE_SIZE) {
+               for (;;) {
+                       mem = vm_page_alloc_guard(object, offset + i);
+                       if (mem != VM_PAGE_NULL)
+                               break;
+                       if (flags & KMA_NOPAGEWAIT) {
+                               kr = KERN_RESOURCE_SHORTAGE;
+                               goto nopage;
+                       }
                        vm_object_unlock(object);
+                       vm_page_more_fictitious();
+                       vm_object_lock(object);
                }
-               vm_map_remove(map, map_addr, map_addr + map_size, 0);
-               vm_object_deallocate(object);
-               return (kr);
+               mem->busy = FALSE;
+       }
+       vm_object_unlock(object);
+
+       kr = vm_map_wire(map, map_addr, map_addr + map_size,
+                        VM_PROT_DEFAULT, FALSE);
+       if (kr != KERN_SUCCESS) {
+               vm_object_lock(object);
+               goto nopage;
        }
+
        /* now that the page is wired, we no longer have to fear coalesce */
        vm_object_deallocate(object);
        if (object == kernel_object)
@@ -299,6 +410,14 @@ kernel_memory_allocate(
         */
        *addrp = CAST_DOWN(vm_offset_t, map_addr);
        return KERN_SUCCESS;
+
+nopage:
+       if (object == kernel_object)
+               vm_object_page_remove(object, offset, offset + i);
+       vm_object_unlock(object);
+       vm_map_remove(map, map_addr, map_addr + map_size, 0);
+       vm_object_deallocate(object);
+       return KERN_RESOURCE_SHORTAGE;
 }
 
 /*
@@ -314,7 +433,9 @@ kmem_alloc(
        vm_offset_t     *addrp,
        vm_size_t       size)
 {
-       return kernel_memory_allocate(map, addrp, size, 0, 0);
+       kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, 0);
+       TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
+       return kr;
 }
 
 /*
@@ -389,7 +510,7 @@ kmem_realloc(
         */
 
        kr = vm_map_find_space(map, &newmapaddr, newmapsize,
-                              (vm_map_offset_t) 0, &newentry);
+                              (vm_map_offset_t) 0, 0, &newentry);
        if (kr != KERN_SUCCESS) {
                vm_object_lock(object);
                for(offset = oldmapsize; 
@@ -526,6 +647,8 @@ kmem_free(
 {
        kern_return_t kr;
 
+       TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr);
+
        kr = vm_map_remove(map, vm_map_trunc_page(addr),
                                vm_map_round_page(addr + size), 
                                VM_MAP_REMOVE_KUNWIRE);
@@ -608,7 +731,7 @@ kmem_remap_pages(
            /*
             *  Wire it down (again)
             */
-           vm_page_lock_queues();
+           vm_page_lockspin_queues();
            vm_page_wire(mem);
            vm_page_unlock_queues();
            vm_object_unlock(object);
@@ -723,27 +846,26 @@ kmem_init(
        map_end = vm_map_round_page(end);
 
        kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
-                                  map_end, FALSE);
-
+                           map_end, FALSE);
        /*
         *      Reserve virtual memory allocated up to this time.
         */
-
        if (start != VM_MIN_KERNEL_ADDRESS) {
                vm_map_offset_t map_addr;
-
                map_addr = VM_MIN_KERNEL_ADDRESS;
                (void) vm_map_enter(kernel_map,
-                                   &map_addr, 
-                                   (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
-                                   (vm_map_offset_t) 0,
-                                   VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
-                                   VM_OBJECT_NULL, 
-                                   (vm_object_offset_t) 0, FALSE,
-                                   VM_PROT_DEFAULT, VM_PROT_ALL,
-                                   VM_INHERIT_DEFAULT);
+                           &map_addr, 
+                           (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
+                           (vm_map_offset_t) 0,
+                           VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
+                           VM_OBJECT_NULL, 
+                           (vm_object_offset_t) 0, FALSE,
+                           VM_PROT_NONE, VM_PROT_NONE,
+                           VM_INHERIT_DEFAULT);
        }
 
+
         /*
          * Account for kernel memory (text, data, bss, vm shenanigans).
          * This may include inaccessible "holes" as determined by what
@@ -752,6 +874,23 @@ kmem_init(
         vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
                                                 + vm_page_active_count
                                                 + vm_page_inactive_count));
+
+       /*
+        * Set the default global user wire limit which limits the amount of
+        * memory that can be locked via mlock().  We set this to the total number of
+        * pages that are potentially usable by a user app (max_mem) minus
+        * 1000 pages.  This keeps 4MB in reserve for the kernel which will hopefully be
+        * enough to avoid memory deadlocks. If for some reason the system has less than
+        * 2000 pages of memory at this point, then we'll allow users to lock up to 80%
+        * of that.  This can be overridden via a sysctl.
+        */
+
+       if (max_mem > 2000)
+               vm_global_user_wire_limit = max_mem - 1000;
+       else
+               vm_global_user_wire_limit = max_mem * 100 / 80;
+       
+       vm_user_wire_limit = vm_global_user_wire_limit;         /* the default per user limit is the same as the global limit */
 }