xnu-2422.90.20.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
index 9b583cd0c4a926f07ddf2eb11bba4fea9091fa29..5d05fa984650fd90b56e9e39a32c0284586abd37 100644 (file)
@@ -92,26 +92,6 @@ mach_get_vm_end(vm_map_t map)
        return( vm_map_last_entry(map)->vme_end);
 }
 
-/*
- * Legacy routines to get the start and end for a vm_map_t.  They
- * return them in the vm_offset_t format.  So, they should only be
- * called on maps that are the same size as the kernel map for
- * accurate results.
- */
-vm_offset_t
-get_vm_start(
-       vm_map_t map)
-{
-       return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
-}
-
-vm_offset_t
-get_vm_end(
-       vm_map_t map)
-{
-       return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
-}
-
 /* 
  * BSD VNODE PAGER 
  */
@@ -128,6 +108,7 @@ const struct memory_object_pager_ops vnode_pager_ops = {
        vnode_pager_synchronize,
        vnode_pager_map,
        vnode_pager_last_unmap,
+       NULL, /* data_reclaim */
        "vnode pager"
 };
 
@@ -271,7 +252,7 @@ mach_macx_triggers(
                }
                /* trigger_port is locked and active */
                ipc_port_make_send_locked(trigger_port); 
-               /* now unlocked */
+               ip_unlock(trigger_port);
                default_pager_triggers(default_pager,
                                        0, 0, 
                                        SWAP_FILE_CREATION_ERROR,
@@ -285,7 +266,7 @@ mach_macx_triggers(
                }
                /* trigger_port is locked and active */
                ipc_port_make_send_locked(trigger_port); 
-               /* now unlocked */
+               ip_unlock(trigger_port);
                default_pager_triggers(default_pager, 
                                       hi_water, low_water,
                                       HI_WAT_ALERT, trigger_port);
@@ -298,7 +279,7 @@ mach_macx_triggers(
                }
                /* trigger_port is locked and active */
                ipc_port_make_send_locked(trigger_port);
-               /* and now its unlocked */
+               ip_unlock(trigger_port);
                default_pager_triggers(default_pager, 
                                       hi_water, low_water,
                                       LO_WAT_ALERT, trigger_port);
@@ -426,63 +407,8 @@ memory_object_control_uiomove(
                        if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
                                break;
 
-                       /*
-                        * if we're in this routine, we are inside a filesystem's
-                        * locking model, so we don't ever want to wait for pages that have
-                        * list_req_pending == TRUE since it means that the
-                        * page is a candidate for some type of I/O operation,
-                        * but that it has not yet been gathered into a UPL...
-                        * this implies that it is still outside the domain
-                        * of the filesystem and that whoever is responsible for
-                        * grabbing it into a UPL may be stuck behind the filesystem
-                        * lock this thread owns, or trying to take a lock exclusively
-                        * and waiting for the readers to drain from a rw lock...
-                        * if we block in those cases, we will deadlock
-                        */
-                       if (dst_page->list_req_pending) {
 
-                               if (dst_page->absent) {
-                                       /*
-                                        * this is the list_req_pending | absent | busy case
-                                        * which originates from vm_fault_page... we want
-                                        * to fall out of the fast path and go back
-                                        * to the caller which will gather this page
-                                        * into a UPL and issue the I/O if no one
-                                        * else beats us to it
-                                        */
-                                       break;
-                               }
-                               if (dst_page->pageout || dst_page->cleaning) {
-                                       /*
-                                        * this is the list_req_pending | pageout | busy case
-                                        * or the list_req_pending | cleaning case...
-                                        * which originate from the pageout_scan and
-                                        * msync worlds for the pageout case and the hibernate
-                                        * pre-cleaning world for the cleaning case...
-                                        * we need to reset the state of this page to indicate
-                                        * it should stay in the cache marked dirty... nothing else we
-                                        * can do at this point... we can't block on it, we can't busy
-                                        * it and we can't clean it from this routine.
-                                        */
-                                       vm_page_lockspin_queues();
-
-                                       vm_pageout_queue_steal(dst_page, TRUE); 
-                                       vm_page_deactivate(dst_page);
-
-                                       vm_page_unlock_queues();
-                               }
-                               /*
-                                * this is the list_req_pending | cleaning case...
-                                * we can go ahead and deal with this page since
-                                * its ok for us to mark this page busy... if a UPL
-                                * tries to gather this page, it will block until the
-                                * busy is cleared, thus allowing us safe use of the page
-                                * when we're done with it, we will clear busy and wake
-                                * up anyone waiting on it, thus allowing the UPL creation
-                                * to finish
-                                */
-
-                       } else if (dst_page->busy || dst_page->cleaning) {
+                       if (dst_page->busy || dst_page->cleaning) {
                                /*
                                 * someone else is playing with the page... if we've
                                 * already collected pages into this run, go ahead
@@ -495,7 +421,11 @@ memory_object_control_uiomove(
                                PAGE_SLEEP(object, dst_page, THREAD_UNINT);
                                continue;
                        }
-
+                       if (dst_page->laundry) {
+                               dst_page->pageout = FALSE;
+                               
+                               vm_pageout_steal_laundry(dst_page, FALSE);
+                       }
                        /*
                         * this routine is only called when copying
                         * to/from real files... no need to consider
@@ -504,7 +434,7 @@ memory_object_control_uiomove(
                        assert(!dst_page->encrypted);
 
                        if (mark_dirty) {
-                               dst_page->dirty = TRUE;
+                               SET_PAGE_DIRTY(dst_page, FALSE);
                                if (dst_page->cs_validated && 
                                    !dst_page->cs_tainted) {
                                        /*
@@ -602,8 +532,10 @@ vnode_pager_bootstrap(void)
        size = (vm_size_t) sizeof(struct vnode_pager);
        vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
                                PAGE_SIZE, "vnode pager structures");
+       zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
        zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);
 
+
 #if CONFIG_CODE_DECRYPTION
        apple_protect_pager_bootstrap();
 #endif /* CONFIG_CODE_DECRYPTION */
@@ -733,10 +665,9 @@ vnode_pager_get_isinuse(
 }
 
 kern_return_t
-vnode_pager_check_hard_throttle(
+vnode_pager_get_throttle_io_limit(
        memory_object_t         mem_obj,
-       uint32_t                *limit,
-       uint32_t                hard_throttle)
+       uint32_t                *limit)
 {
        vnode_pager_t   vnode_object;
 
@@ -745,7 +676,23 @@ vnode_pager_check_hard_throttle(
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       (void)vnode_pager_return_hard_throttle_limit(vnode_object->vnode_handle, limit, hard_throttle);
+       (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_get_isSSD(
+       memory_object_t         mem_obj,
+       boolean_t               *isSSD)
+{
+       vnode_pager_t   vnode_object;
+
+       if (mem_obj->mo_pager_ops != &vnode_pager_ops)
+               return KERN_INVALID_ARGUMENT;
+
+       vnode_object = vnode_pager_lookup(mem_obj);
+
+       *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
        return KERN_SUCCESS;
 }
 
@@ -768,10 +715,13 @@ vnode_pager_get_object_size(
 }
 
 kern_return_t
-vnode_pager_get_object_pathname(
+vnode_pager_get_object_name(
        memory_object_t         mem_obj,
        char                    *pathname,
-       vm_size_t               *length_p)
+       vm_size_t               pathname_len,
+       char                    *filename,
+       vm_size_t               filename_len,
+       boolean_t               *truncated_path_p)
 {
        vnode_pager_t   vnode_object;
 
@@ -781,15 +731,19 @@ vnode_pager_get_object_pathname(
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       return vnode_pager_get_pathname(vnode_object->vnode_handle,
-                                       pathname,
-                                       length_p);
+       return vnode_pager_get_name(vnode_object->vnode_handle,
+                                   pathname,
+                                   pathname_len,
+                                   filename,
+                                   filename_len,
+                                   truncated_path_p);
 }
 
 kern_return_t
-vnode_pager_get_object_filename(
-       memory_object_t mem_obj,
-       const char      **filename)
+vnode_pager_get_object_mtime(
+       memory_object_t         mem_obj,
+       struct timespec         *mtime,
+       struct timespec         *cs_mtime)
 {
        vnode_pager_t   vnode_object;
 
@@ -799,8 +753,9 @@ vnode_pager_get_object_filename(
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       return vnode_pager_get_filename(vnode_object->vnode_handle,
-                                       filename);
+       return vnode_pager_get_mtime(vnode_object->vnode_handle,
+                                    mtime,
+                                    cs_mtime);
 }
 
 kern_return_t
@@ -821,6 +776,25 @@ vnode_pager_get_object_cs_blobs(
                                        blobs);
 }
 
+#if CHECK_CS_VALIDATION_BITMAP
+kern_return_t
+vnode_pager_cs_check_validation_bitmap( 
+       memory_object_t mem_obj, 
+       memory_object_offset_t  offset,
+        int            optype  )
+{
+       vnode_pager_t   vnode_object;
+
+       if (mem_obj == MEMORY_OBJECT_NULL ||
+           mem_obj->mo_pager_ops != &vnode_pager_ops) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       vnode_object = vnode_pager_lookup(mem_obj);
+       return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
+}
+#endif /* CHECK_CS_VALIDATION_BITMAP */
+
 /*
  *
  */