]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_pageout.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.c
index fdc811c8177adcad370602ba765c78e46686fba8..652d41a9d4e6a419dc68364a276d8514f095169e 100644 (file)
@@ -1,29 +1,23 @@
 /*
  * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_HEADER_START@
  * 
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License").  You may not use this file except in compliance with the
+ * License.  Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
  * 
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
  * 
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ * @APPLE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
 /*
  * ENCRYPTED SWAP:
  */
+#ifdef __ppc__
+#include <ppc/mappings.h>
+#endif /* __ppc__ */
 #include <../bsd/crypto/aes/aes.h>
 
 extern ipc_port_t      memory_manager_default;
@@ -1066,6 +1063,9 @@ struct flow_control {
         mach_timespec_t        ts;
 };
 
+extern kern_return_t   sysclk_gettime(mach_timespec_t *);
+
+
 void
 vm_pageout_scan(void)
 {
@@ -1351,9 +1351,7 @@ done_with_activepage:
 reset_deadlock_timer:
                                ts.tv_sec = vm_pageout_deadlock_wait / 1000;
                                ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
-                               clock_get_system_nanotime(
-                                       &flow_control.ts.tv_sec,
-                                       (uint32_t *) &flow_control.ts.tv_nsec);
+                               sysclk_gettime(&flow_control.ts);
                                ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
                                
                                flow_control.state = FCS_DELAYED;
@@ -1362,9 +1360,7 @@ reset_deadlock_timer:
                                break;
                                        
                        case FCS_DELAYED:
-                               clock_get_system_nanotime(
-                                       &ts.tv_sec,
-                                       (uint32_t *) &ts.tv_nsec);
+                               sysclk_gettime(&ts);
 
                                if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
                                        /*
@@ -1937,9 +1933,7 @@ vm_pageout_iothread_continue(struct vm_pageout_queue *q)
                            */
 
                           if (!object->pager_initialized)
-                                  vm_object_collapse(object,
-                                                     (vm_object_offset_t) 0,
-                                                     TRUE);
+                                  vm_object_collapse(object, (vm_object_offset_t)0);
                           if (!object->pager_initialized)
                                   vm_object_pager_create(object);
                           if (!object->pager_initialized) {
@@ -2189,6 +2183,7 @@ vm_pageout(void)
 
        vm_object_reaper_init();
 
+
        vm_pageout_continue();
        /*NOTREACHED*/
 }
@@ -2220,7 +2215,6 @@ upl_create(
        upl->size = 0;
        upl->map_object = NULL;
        upl->ref_count = 1;
-       upl->highest_page = 0;
        upl_lock_init(upl);
 #ifdef UPL_DEBUG
        upl->ubc_alias1 = 0;
@@ -2399,7 +2393,7 @@ vm_object_upl_request(
                        *page_list_count = MAX_UPL_TRANSFER;
 
        if((!object->internal) && (object->paging_offset != 0))
-               panic("vm_object_upl_request: external object with non-zero paging offset\n");
+               panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
 
        if((cntrl_flags & UPL_COPYOUT_FROM) && (upl_ptr == NULL)) {
                return KERN_SUCCESS;
@@ -2507,7 +2501,6 @@ vm_object_upl_request(
                                   (offset + object->shadow_offset)>>PAGE_SHIFT;
                                user_page_list[0].device = TRUE;
                        }
-                       upl->highest_page = (offset + object->shadow_offset + size - 1)>>PAGE_SHIFT;
 
                        if(page_list_count != NULL) {
                                if (upl->flags & UPL_INTERNAL) {
@@ -2820,9 +2813,6 @@ check_busy:
                                        }
                                }
 
-                               if (dst_page->phys_page > upl->highest_page)
-                                       upl->highest_page = dst_page->phys_page;
-
                                if(user_page_list) {
                                        user_page_list[entry].phys_addr
                                                = dst_page->phys_page;
@@ -3135,10 +3125,6 @@ check_busy:
                                dst_page->precious = 
                                        (cntrl_flags & UPL_PRECIOUS) 
                                                        ? TRUE : FALSE;
-
-                               if (dst_page->phys_page > upl->highest_page)
-                                       upl->highest_page = dst_page->phys_page;
-
                                if(user_page_list) {
                                        user_page_list[entry].phys_addr
                                                = dst_page->phys_page;
@@ -3247,7 +3233,7 @@ vm_fault_list_request(
        int                     page_list_count,
        int                     cntrl_flags)
 {
-       unsigned int            local_list_count;
+       int                     local_list_count;
        upl_page_info_t         *user_page_list;
        kern_return_t           kr;
 
@@ -4706,21 +4692,6 @@ vm_object_iopl_request(
                 */
                return KERN_INVALID_VALUE;
        }
-       if (vm_lopage_poolsize == 0)
-               cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
-
-       if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
-               if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
-                       return KERN_INVALID_VALUE;
-
-               if (object->phys_contiguous) {
-                       if ((offset + object->shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
-                               return KERN_INVALID_ADDRESS;
-                         
-                       if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
-                               return KERN_INVALID_ADDRESS;
-               }
-       }
 
        if (cntrl_flags & UPL_ENCRYPT) {
                /*
@@ -4753,7 +4724,7 @@ vm_object_iopl_request(
                return KERN_INVALID_ARGUMENT;
 
        if((!object->internal) && (object->paging_offset != 0))
-               panic("vm_object_upl_request: external object with non-zero paging offset\n");
+               panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
 
        if(object->phys_contiguous) {
                /* No paging operations are possible against this memory */
@@ -4821,7 +4792,6 @@ vm_object_iopl_request(
                                  (offset + object->shadow_offset)>>PAGE_SHIFT;
                                user_page_list[0].device = TRUE;
                        }
-                       upl->highest_page = (offset + object->shadow_offset + size - 1)>>PAGE_SHIFT;
 
                        if(page_list_count != NULL) {
                                if (upl->flags & UPL_INTERNAL) {
@@ -4991,75 +4961,24 @@ vm_object_iopl_request(
                                ret = (error_code ? error_code:
                                        KERN_MEMORY_ERROR);
                                vm_object_lock(object);
-
-                               goto return_err;
+                               for(; offset < dst_offset;
+                                               offset += PAGE_SIZE) {
+                                  dst_page = vm_page_lookup(
+                                               object, offset);
+                                  if(dst_page == VM_PAGE_NULL)
+                                       panic("vm_object_iopl_request: Wired pages missing. \n");
+                                  vm_page_lock_queues();
+                                  vm_page_unwire(dst_page);
+                                  vm_page_unlock_queues();
+                                  VM_STAT(reactivations++);
+                               }
+                               vm_object_unlock(object);
+                               upl_destroy(upl);
+                               return ret;
                        }
                   } while ((result != VM_FAULT_SUCCESS) 
                                || (result == VM_FAULT_INTERRUPTED));
                }
-
-               if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
-                    dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
-                       vm_page_t       low_page;
-                       int             refmod;
-
-                       /*
-                        * support devices that can't DMA above 32 bits
-                        * by substituting pages from a pool of low address
-                        * memory for any pages we find above the 4G mark
-                        * can't substitute if the page is already wired because
-                        * we don't know whether that physical address has been
-                        * handed out to some other 64 bit capable DMA device to use
-                        */
-                       if (dst_page->wire_count) {
-                               ret = KERN_PROTECTION_FAILURE;
-                               goto return_err;
-                       }
-                       if (delayed_unlock) {
-                               delayed_unlock = 0;
-                               vm_page_unlock_queues();
-                       }
-                       low_page = vm_page_grablo();
-
-                       if (low_page == VM_PAGE_NULL) {
-                               ret = KERN_RESOURCE_SHORTAGE;
-                               goto return_err;
-                       }
-                       /*
-                        * from here until the vm_page_replace completes
-                        * we musn't drop the object lock... we don't
-                        * want anyone refaulting this page in and using
-                        * it after we disconnect it... we want the fault
-                        * to find the new page being substituted.
-                        */
-                       refmod = pmap_disconnect(dst_page->phys_page);
-
-                       vm_page_copy(dst_page, low_page);
-                       
-                       low_page->reference = dst_page->reference;
-                       low_page->dirty     = dst_page->dirty;
-
-                       if (refmod & VM_MEM_REFERENCED)
-                               low_page->reference = TRUE;
-                       if (refmod & VM_MEM_MODIFIED)
-                               low_page->dirty = TRUE;
-
-                       vm_page_lock_queues();
-                       vm_page_replace(low_page, object, dst_offset);
-                       /*
-                        * keep the queue lock since we're going to 
-                        * need it immediately
-                        */
-                       delayed_unlock = 1;
-
-                       dst_page = low_page;
-                       /*
-                        * vm_page_grablo returned the page marked
-                        * BUSY... we don't need a PAGE_WAKEUP_DONE
-                        * here, because we've never dropped the object lock
-                        */
-                       dst_page->busy = FALSE;
-               }
                if (delayed_unlock == 0)
                        vm_page_lock_queues();
                vm_page_wire(dst_page);
@@ -5105,9 +5024,6 @@ vm_object_iopl_request(
                                dst_page->dirty = TRUE;
                        alias_page = NULL;
 
-                       if (dst_page->phys_page > upl->highest_page)
-                               upl->highest_page = dst_page->phys_page;
-
                        if (user_page_list) {
                                user_page_list[entry].phys_addr
                                        = dst_page->phys_page;
@@ -5160,30 +5076,8 @@ vm_object_iopl_request(
        }
 
        return KERN_SUCCESS;
-
-
-return_err:
-       if (delayed_unlock)
-               vm_page_unlock_queues();
-
-       for (; offset < dst_offset; offset += PAGE_SIZE) {
-               dst_page = vm_page_lookup(object, offset);
-
-               if (dst_page == VM_PAGE_NULL)
-                       panic("vm_object_iopl_request: Wired pages missing. \n");
-               vm_page_lock_queues();
-               vm_page_unwire(dst_page);
-               vm_page_unlock_queues();
-               VM_STAT(reactivations++);
-       }
-       vm_object_paging_end(object);
-       vm_object_unlock(object);
-       upl_destroy(upl);
-
-       return ret;
 }
 
-
 kern_return_t
 upl_transpose(
        upl_t           upl1,
@@ -5331,12 +5225,16 @@ vm_paging_map_object(
        vm_map_offset_t         page_map_offset;
        vm_map_size_t           map_size;
        vm_object_offset_t      object_offset;
+#ifdef __ppc__
        int                     i;
        vm_map_entry_t          map_entry;
+#endif /* __ppc__ */
 
 
+#ifdef __ppc__
        if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
                /*
+                * Optimization for the PowerPC.
                 * Use one of the pre-allocated kernel virtual addresses
                 * and just enter the VM page in the kernel address space
                 * at that virtual address.
@@ -5355,7 +5253,6 @@ vm_paging_map_object(
                                               &page_map_offset,
                                               VM_PAGING_NUM_PAGES * PAGE_SIZE,
                                               0,
-                                              0,
                                               &map_entry);
                        if (kr != KERN_SUCCESS) {
                                panic("vm_paging_map_object: "
@@ -5407,17 +5304,14 @@ vm_paging_map_object(
                        }
                        vm_paging_page_inuse[i] = TRUE;
                        simple_unlock(&vm_paging_lock);
-                       if (page->no_isync == TRUE) {
-                               pmap_sync_page_data_phys(page->phys_page);
-                       }
-                       assert(pmap_verify_free(page->phys_page));
-                       PMAP_ENTER(kernel_pmap,
-                                  page_map_offset,
-                                  page,
-                                  VM_PROT_DEFAULT,
-                                  ((int) page->object->wimg_bits &
-                                   VM_WIMG_MASK),
-                                  TRUE);
+                       pmap_map_block(kernel_pmap,
+                                      page_map_offset,
+                                      page->phys_page,
+                                      1,                                               /* Size is number of 4k pages */
+                                      VM_PROT_DEFAULT,
+                                      ((int) page->object->wimg_bits &
+                                       VM_WIMG_MASK),
+                                      0);
                        vm_paging_objects_mapped++;
                        vm_paging_pages_mapped++; 
                        *address = page_map_offset;
@@ -5436,6 +5330,7 @@ vm_paging_map_object(
                simple_unlock(&vm_paging_lock);
                vm_object_lock(object);
        }
+#endif /* __ppc__ */
 
        object_offset = vm_object_trunc_page(offset);
        map_size = vm_map_round_page(*size);
@@ -5493,13 +5388,12 @@ vm_paging_map_object(
                }
                cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
 
-               assert(pmap_verify_free(page->phys_page));
                PMAP_ENTER(kernel_pmap,
                           *address + page_map_offset,
                           page,
                           VM_PROT_DEFAULT,
                           cache_attr,
-                          TRUE);
+                          FALSE);
        }
                           
        vm_paging_objects_mapped_slow++;
@@ -5524,9 +5418,11 @@ vm_paging_unmap_object(
        vm_map_offset_t end)
 {
        kern_return_t   kr;
+#ifdef __ppc__
        int             i;
+#endif /* __ppc__ */
 
-       if ((vm_paging_base_address == 0) ||
+       if ((vm_paging_base_address != 0) ||
            (start < vm_paging_base_address) ||
            (end > (vm_paging_base_address
                    + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
@@ -5549,15 +5445,17 @@ vm_paging_unmap_object(
                 * pre-allocated pool.  Put it back in the pool
                 * for next time.
                 */
+#ifdef __ppc__
                assert(end - start == PAGE_SIZE);
                i = (start - vm_paging_base_address) >> PAGE_SHIFT;
 
                /* undo the pmap mapping */
-               pmap_remove(kernel_pmap, start, end);
+               mapping_remove(kernel_pmap, start);
 
                simple_lock(&vm_paging_lock);
                vm_paging_page_inuse[i] = FALSE;
                simple_unlock(&vm_paging_lock);
+#endif /* __ppc__ */
        }
 }
 
@@ -5949,7 +5847,7 @@ vm_page_decrypt(
         * be part of a DMA transfer from a driver that expects the memory to
         * be coherent at this point, we have to flush the data cache.
         */
-       pmap_sync_page_attributes_phys(page->phys_page);
+       pmap_sync_page_data_phys(page->phys_page);
        /*
         * Since the page is not mapped yet, some code might assume that it
         * doesn't need to invalidate the instruction cache when writing to
@@ -6072,16 +5970,18 @@ upl_get_internal_pagelist_offset(void)
        return sizeof(struct upl);
 }
 
+void
+upl_set_dirty(
+       upl_t   upl)
+{
+       upl->flags |= UPL_CLEAR_DIRTY;
+}
+
 void
 upl_clear_dirty(
-       upl_t           upl,
-       boolean_t       value)
+       upl_t   upl)
 {
-       if (value) {
-               upl->flags |= UPL_CLEAR_DIRTY;
-       } else {
-               upl->flags &= ~UPL_CLEAR_DIRTY;
-       }
+       upl->flags &= ~UPL_CLEAR_DIRTY;
 }
 
 
@@ -6176,12 +6076,6 @@ vm_countdirtypages(void)
 }
 #endif /* MACH_BSD */
 
-ppnum_t upl_get_highest_page(
-       upl_t                   upl)
-{
-       return upl->highest_page;
-}
-
 #ifdef UPL_DEBUG
 kern_return_t  upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2)
 {