]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_resident.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
index 2ba881ad4a7cd55153f926729a5fa5749b908fbd..8d07635f4b2aea21278f25a8188e2a0d8beb120d 100644 (file)
@@ -1,23 +1,31 @@
 /*
  * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
+ * This file contains Original Code and/or Modifications of Original Code 
+ * as defined in and that are subject to the Apple Public Source License 
+ * Version 2.0 (the 'License'). You may not use this file except in 
+ * compliance with the License.  The rights granted to you under the 
+ * License may not be used to create, or enable the creation or 
+ * redistribution of, unlawful or unlicensed copies of an Apple operating 
+ * system, or to circumvent, violate, or enable the circumvention or 
+ * violation of, any terms of an Apple operating system software license 
+ * agreement.
+ *
+ * Please obtain a copy of the License at 
+ * http://www.opensource.apple.com/apsl/ and read it before using this 
+ * file.
+ *
+ * The Original Code and all software distributed under the License are 
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
+ * Please see the License for the specific language governing rights and 
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -244,12 +252,6 @@ unsigned int       vm_page_gobble_count_warning = 0;
 unsigned int   vm_page_purgeable_count = 0; /* # of pages purgeable now */
 uint64_t       vm_page_purged_count = 0;    /* total count of purged pages */
 
-ppnum_t                vm_lopage_poolstart = 0;
-ppnum_t                vm_lopage_poolend = 0;
-int            vm_lopage_poolsize = 0;
-uint64_t       max_valid_dma_address = 0xffffffffffffffffULL;
-
-
 /*
  *     Several page replacement parameters are also
  *     shared with this module, so that page allocation
@@ -555,8 +557,6 @@ pmap_startup(
        vm_page_t       pages;
        ppnum_t         phys_page;
        addr64_t        tmpaddr;
-       unsigned int    num_of_lopages = 0;
-       unsigned int    last_index;
 
        /*
         *      We calculate how many page frames we will have
@@ -572,6 +572,7 @@ pmap_startup(
        /*
         *      Initialize the page frames.
         */
+
        for (i = 0, pages_initialized = 0; i < npages; i++) {
                if (!pmap_next_page(&phys_page))
                        break;
@@ -581,65 +582,21 @@ pmap_startup(
                pages_initialized++;
        }
 
-       /*
-        * Check if we want to initialize pages to a known value
-        */
-       fill = 0;                                                               /* Assume no fill */
-       if (PE_parse_boot_arg("fill", &fillval)) fill = 1;                      /* Set fill */
-
-       /*
-        * if vm_lopage_poolsize is non-zero, than we need to reserve
-        * a pool of pages whose addresess are less than 4G... this pool
-        * is used by drivers whose hardware can't DMA beyond 32 bits...
-        *
-        * note that I'm assuming that the page list is ascending and
-        * ordered w/r to the physical address
-        */
-       for (i = 0, num_of_lopages = vm_lopage_poolsize; num_of_lopages && i < pages_initialized; num_of_lopages--, i++) {
-               vm_page_t m;
-
-               m = &pages[i];
-
-               if (m->phys_page >= (1 << (32 - PAGE_SHIFT)))
-                       panic("couldn't reserve the lopage pool: not enough lo pages\n");
-
-               if (m->phys_page < vm_lopage_poolend)
-                       panic("couldn't reserve the lopage pool: page list out of order\n");
-
-               vm_lopage_poolend = m->phys_page;
-
-               if (vm_lopage_poolstart == 0)
-                       vm_lopage_poolstart = m->phys_page;
-               else {
-                       if (m->phys_page < vm_lopage_poolstart)
-                               panic("couldn't reserve the lopage pool: page list out of order\n");
-               }
-
-               if (fill)
-                       fillPage(m->phys_page, fillval);                /* Fill the page with a know value if requested at boot */                      
-
-               vm_page_release(m);
-       } 
-       last_index = i;
-
-       // -debug code remove
-       if (2 == vm_himemory_mode) {
-               // free low -> high so high is preferred
-               for (i = last_index + 1; i <= pages_initialized; i++) {
-                       if(fill) fillPage(pages[i - 1].phys_page, fillval);             /* Fill the page with a know value if requested at boot */                      
-                       vm_page_release(&pages[i - 1]);
-               }
-       }
-       else
-       // debug code remove-
-
        /*
         * Release pages in reverse order so that physical pages
         * initially get allocated in ascending addresses. This keeps
         * the devices (which must address physical memory) happy if
         * they require several consecutive pages.
         */
-       for (i = pages_initialized; i > last_index; i--) {
+
+/*
+ *             Check if we want to initialize pages to a known value
+ */
+       
+       fill = 0;                                                                                                       /* Assume no fill */
+       if (PE_parse_boot_arg("fill", &fillval)) fill = 1;                      /* Set fill */
+       
+       for (i = pages_initialized; i > 0; i--) {
                if(fill) fillPage(pages[i - 1].phys_page, fillval);             /* Fill the page with a know value if requested at boot */                      
                vm_page_release(&pages[i - 1]);
        }
@@ -857,8 +814,7 @@ vm_page_replace(
        register vm_object_t            object,
        register vm_object_offset_t     offset)
 {
-       vm_page_bucket_t *bucket;
-       vm_page_t        found_m = VM_PAGE_NULL;
+       register vm_page_bucket_t *bucket;
 
        VM_PAGE_CHECK(mem);
 #if DEBUG
@@ -884,60 +840,46 @@ vm_page_replace(
 
        bucket = &vm_page_buckets[vm_page_hash(object, offset)];
        simple_lock(&vm_page_bucket_lock);
-
        if (bucket->pages) {
                vm_page_t *mp = &bucket->pages;
                register vm_page_t m = *mp;
-
                do {
                        if (m->object == object && m->offset == offset) {
                                /*
-                                * Remove old page from hash list
+                                * Remove page from bucket and from object,
+                                * and return it to the free list.
                                 */
                                *mp = m->next;
+                               VM_PAGE_REMOVE(m);
+                               m->tabled = FALSE;
+                               m->object = VM_OBJECT_NULL;
+                               m->offset = (vm_object_offset_t) -1;
+                               object->resident_page_count--;
+
+                               if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
+                                   object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
+                                       assert(vm_page_purgeable_count > 0);
+                                       vm_page_purgeable_count--;
+                               }
+                                       
+                               /*
+                                * Return page to the free list.
+                                * Note the page is not tabled now, so this
+                                * won't self-deadlock on the bucket lock.
+                                */
 
-                               found_m = m;
+                               vm_page_free(m);
                                break;
                        }
                        mp = &m->next;
                } while ((m = *mp));
-
                mem->next = bucket->pages;
        } else {
                mem->next = VM_PAGE_NULL;
        }
-       /*
-        * insert new page at head of hash list
-        */
        bucket->pages = mem;
-
        simple_unlock(&vm_page_bucket_lock);
 
-       if (found_m) {
-               /*
-                * there was already a page at the specified
-                * offset for this object... remove it from
-                * the object and free it back to the free list
-                */
-               VM_PAGE_REMOVE(found_m);
-               found_m->tabled = FALSE;
-
-               found_m->object = VM_OBJECT_NULL;
-               found_m->offset = (vm_object_offset_t) -1;
-               object->resident_page_count--;
-
-               if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
-                   object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
-                       assert(vm_page_purgeable_count > 0);
-                       vm_page_purgeable_count--;
-               }
-                                       
-               /*
-                * Return page to the free list.
-                * Note the page is not tabled now
-                */
-               vm_page_free(found_m);
-       }
        /*
         *      Now link into the object's list of backed pages.
         */
@@ -1100,19 +1042,7 @@ vm_page_lookup(
 
        bucket = &vm_page_buckets[vm_page_hash(object, offset)];
 
-        /*
-         * since we hold the object lock, we are guaranteed that no
-         * new pages can be inserted into this object... this in turn
-         * guarantess that the page we're looking for can't exist
-         * if the bucket it hashes to is currently NULL even when looked
-         * at outside the scope of the hash bucket lock... this is a
-         * really cheap optimiztion to avoid taking the lock
-         */
-        if (bucket->pages == VM_PAGE_NULL) {
-                return (VM_PAGE_NULL);
-        }
        simple_lock(&vm_page_bucket_lock);
-
        for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
                VM_PAGE_CHECK(mem);
                if ((mem->object == object) && (mem->offset == offset))
@@ -1414,55 +1344,6 @@ vm_pool_low(void)
        return( vm_page_free_count < vm_page_free_reserved );
 }
 
-
-
-/*
- * this is an interface to support bring-up of drivers
- * on platforms with physical memory > 4G...
- */
-int            vm_himemory_mode = 0;
-
-
-/*
- * this interface exists to support hardware controllers
- * incapable of generating DMAs with more than 32 bits
- * of address on platforms with physical memory > 4G...
- */
-unsigned int   vm_lopage_free_count = 0;
-unsigned int   vm_lopage_max_count = 0;
-vm_page_t      vm_lopage_queue_free = VM_PAGE_NULL;
-
-vm_page_t
-vm_page_grablo(void)
-{
-       register vm_page_t      mem;
-       unsigned int vm_lopage_alloc_count;
-
-       if (vm_lopage_poolsize == 0)
-               return (vm_page_grab());
-
-       mutex_lock(&vm_page_queue_free_lock);
-
-       if ((mem = vm_lopage_queue_free) != VM_PAGE_NULL) {
-
-               vm_lopage_queue_free = (vm_page_t) mem->pageq.next;
-               mem->pageq.next = NULL;
-               mem->pageq.prev = NULL;
-               mem->free = FALSE;
-               mem->no_isync = TRUE;
-
-               vm_lopage_free_count--;
-               vm_lopage_alloc_count = (vm_lopage_poolend - vm_lopage_poolstart) - vm_lopage_free_count;
-               if (vm_lopage_alloc_count > vm_lopage_max_count)
-                       vm_lopage_max_count = vm_lopage_alloc_count;
-       }
-       mutex_unlock(&vm_page_queue_free_lock);
-
-       return (mem);
-}
-
-
-
 /*
  *     vm_page_grab:
  *
@@ -1588,46 +1469,36 @@ vm_page_release(
        assert(mem->object == VM_OBJECT_NULL);
        assert(mem->pageq.next == NULL &&
               mem->pageq.prev == NULL);
+       mem->pageq.next = (queue_entry_t) vm_page_queue_free;
+       vm_page_queue_free = mem;
+       vm_page_free_count++;
 
-       if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) {
-               /*
-                * this exists to support hardware controllers
-                * incapable of generating DMAs with more than 32 bits
-                * of address on platforms with physical memory > 4G...
-                */
-               mem->pageq.next = (queue_entry_t) vm_lopage_queue_free;
-               vm_lopage_queue_free = mem;
-               vm_lopage_free_count++;
-       } else {          
-               mem->pageq.next = (queue_entry_t) vm_page_queue_free;
-               vm_page_queue_free = mem;
-               vm_page_free_count++;
-               /*
-                *      Check if we should wake up someone waiting for page.
-                *      But don't bother waking them unless they can allocate.
-                *
-                *      We wakeup only one thread, to prevent starvation.
-                *      Because the scheduling system handles wait queues FIFO,
-                *      if we wakeup all waiting threads, one greedy thread
-                *      can starve multiple niceguy threads.  When the threads
-                *      all wakeup, the greedy threads runs first, grabs the page,
-                *      and waits for another page.  It will be the first to run
-                *      when the next page is freed.
-                *
-                *      However, there is a slight danger here.
-                *      The thread we wake might not use the free page.
-                *      Then the other threads could wait indefinitely
-                *      while the page goes unused.  To forestall this,
-                *      the pageout daemon will keep making free pages
-                *      as long as vm_page_free_wanted is non-zero.
-                */
+       /*
+        *      Check if we should wake up someone waiting for page.
+        *      But don't bother waking them unless they can allocate.
+        *
+        *      We wakeup only one thread, to prevent starvation.
+        *      Because the scheduling system handles wait queues FIFO,
+        *      if we wakeup all waiting threads, one greedy thread
+        *      can starve multiple niceguy threads.  When the threads
+        *      all wakeup, the greedy threads runs first, grabs the page,
+        *      and waits for another page.  It will be the first to run
+        *      when the next page is freed.
+        *
+        *      However, there is a slight danger here.
+        *      The thread we wake might not use the free page.
+        *      Then the other threads could wait indefinitely
+        *      while the page goes unused.  To forestall this,
+        *      the pageout daemon will keep making free pages
+        *      as long as vm_page_free_wanted is non-zero.
+        */
 
-               if ((vm_page_free_wanted > 0) &&
-                   (vm_page_free_count >= vm_page_free_reserved)) {
-                       vm_page_free_wanted--;
-                       thread_wakeup_one((event_t) &vm_page_free_count);
-               }
+       if ((vm_page_free_wanted > 0) &&
+           (vm_page_free_count >= vm_page_free_reserved)) {
+               vm_page_free_wanted--;
+               thread_wakeup_one((event_t) &vm_page_free_count);
        }
+
        mutex_unlock(&vm_page_queue_free_lock);
 }
 
@@ -1705,27 +1576,6 @@ vm_page_alloc(
        return(mem);
 }
 
-
-vm_page_t
-vm_page_alloclo(
-       vm_object_t             object,
-       vm_object_offset_t      offset)
-{
-       register vm_page_t      mem;
-
-#if DEBUG
-       _mutex_assert(&object->Lock, MA_OWNED);
-#endif
-       mem = vm_page_grablo();
-       if (mem == VM_PAGE_NULL)
-               return VM_PAGE_NULL;
-
-       vm_page_insert(mem, object, offset);
-
-       return(mem);
-}
-
-
 counter(unsigned int c_laundry_pages_freed = 0;)
 
 int vm_pagein_cluster_unused = 0;