]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/memory_object.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
index de7baff299a1821cc16e4cf6e17ac9bc2c7c1c41..d4bf4dcd8ce78e7934c1ab6b1c6b3d16a44bf763 100644 (file)
@@ -62,8 +62,6 @@
  *     External memory management interface control functions.
  */
 
-#include <advisory_pageout.h>
-
 /*
  *     Interface dependencies:
  */
 #include <vm/vm_purgeable_internal.h>  /* Needed by some vm_page.h macros */
 #include <vm/vm_shared_region.h>
 
-#if    MACH_PAGEMAP
 #include <vm/vm_external.h>
-#endif /* MACH_PAGEMAP */
 
 #include <vm/vm_protos.h>
 
-
 memory_object_default_t        memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
 decl_lck_mtx_data(,    memory_manager_default_lock)
 
@@ -134,7 +129,7 @@ decl_lck_mtx_data(, memory_manager_default_lock)
 
 #define        memory_object_should_return_page(m, should_return) \
     (should_return != MEMORY_OBJECT_RETURN_NONE && \
-     (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
+     (((m)->dirty || ((m)->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \
       ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
       (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
 
@@ -176,57 +171,12 @@ memory_object_lock_page(
             m, should_return, should_flush, prot, 0);
 
 
-       if (m->busy || m->cleaning) {
-               if (m->list_req_pending &&
-                   should_return == MEMORY_OBJECT_RETURN_NONE &&
-                   should_flush == TRUE) {
+       if (m->busy || m->cleaning)
+               return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
 
-                       if (m->absent) {
-                               /*
-                                * this is the list_req_pending | absent | busy case
-                                * which originates from vm_fault_page. 
-                                * Combine that with should_flush == TRUE and we
-                                * have a case where we need to toss the page from
-                                * the object.
-                                */
-                               if (!VM_PAGE_WIRED(m)) {
-                                       return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE);
-                               } else {
-                                       return (MEMORY_OBJECT_LOCK_RESULT_DONE);
-                               }
-                       }
-                       if  (m->pageout || m->cleaning) {
-                               /*
-                                * if pageout is set, page was earmarked by vm_pageout_scan
-                                * to be cleaned and stolen... if cleaning is set, we're
-                                * pre-cleaning pages for a hibernate...
-                                * in either case, we're going
-                                * to take it back since we are being asked to
-                                * flush the page w/o cleaning it (i.e. we don't
-                                * care that it's dirty, we want it gone from
-                                * the cache) and we don't want to stall
-                                * waiting for it to be cleaned for 2 reasons...
-                                * 1 - no use paging it out since we're probably
-                                *     shrinking the file at this point or we no
-                                *     longer care about the data in the page
-                                * 2 - if we stall, we may casue a deadlock in
-                                *     the FS trying to acquire its locks
-                                *     on the VNOP_PAGEOUT path presuming that
-                                *     those locks are already held on the truncate
-                                *     path before calling through to this function
-                                *
-                                * so undo all of the state that vm_pageout_scan
-                                * hung on this page
-                                */
+       if (m->laundry)
+               vm_pageout_steal_laundry(m, FALSE);
 
-                               vm_pageout_queue_steal(m, FALSE);
-                               PAGE_WAKEUP_DONE(m);
-                       } else {
-                               panic("list_req_pending on page %p without absent/pageout/cleaning set\n", m);
-                       }
-               } else
-                       return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
-       }
        /*
         *      Don't worry about pages for which the kernel
         *      does not have any data.
@@ -262,8 +212,9 @@ memory_object_lock_page(
                 * for the page to go from the clean to the dirty state
                 * after we've made our decision
                 */
-               if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
-                       m->dirty = TRUE;
+               if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
+                       SET_PAGE_DIRTY(m, FALSE);
+               }
        } else {
                /*
                 * If we are decreasing permission, do it now;
@@ -271,7 +222,7 @@ memory_object_lock_page(
                 * (pmap_page_protect may not increase protection).
                 */
                if (prot != VM_PROT_NO_CHANGE)
-                       pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot);
+                       pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m), VM_PROT_ALL & ~prot);
        }
        /*
         *      Handle returning dirty or precious pages
@@ -287,7 +238,7 @@ memory_object_lock_page(
                 * faulted back into an address space
                 *
                 *      if (!should_flush)
-                *              pmap_disconnect(m->phys_page);
+                *              pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
                 */
                return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
        }
@@ -498,9 +449,17 @@ vm_object_sync(
        vm_object_lock(object);
        vm_object_paging_begin(object);
 
-       if (should_flush)
+       if (should_flush) {
                flags = MEMORY_OBJECT_DATA_FLUSH;
-       else
+               /*
+                * This flush is from an msync(), not a truncate(), so the
+                * contents of the file are not affected.
+                * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know
+                * that the data is not changed and that there's no need to
+                * push the old contents to a copy object.
+                */
+               flags |= MEMORY_OBJECT_DATA_NO_CHANGE;
+       } else
                flags = 0;
 
        if (should_iosync)
@@ -527,7 +486,7 @@ MACRO_BEGIN                                                         \
         int                    upl_flags;                              \
        memory_object_t         pager;                                  \
                                                                        \
-       if (object == slide_info.slide_object) {                                        \
+       if (object->object_slid) {                                      \
                panic("Objects with slid pages not allowed\n");         \
        }                                                               \
                                                                        \
@@ -554,7 +513,8 @@ MACRO_BEGIN                                                         \
        }                                                               \
 MACRO_END
 
-
+extern struct vnode *
+vnode_pager_lookup_vnode(memory_object_t);
 
 static int
 vm_object_update_extent(
@@ -578,10 +538,12 @@ vm_object_update_extent(
        struct vm_page_delayed_work     *dwp;
        int             dw_count;
        int             dw_limit;
+       int             dirty_count;
 
         dwp = &dw_array[0];
         dw_count = 0;
        dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+       dirty_count = 0;
 
        for (;
             offset < offset_end && object->resident_page_count;
@@ -589,13 +551,13 @@ vm_object_update_extent(
 
                /*
                 * Limit the number of pages to be cleaned at once to a contiguous
-                * run, or at most MAX_UPL_TRANSFER size
+                * run, or at most MAX_UPL_TRANSFER_BYTES
                 */
                if (data_cnt) {
-                       if ((data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) || (next_offset != offset)) {
+                       if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
 
                                if (dw_count) {
-                                       vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
                                        dwp = &dw_array[0];
                                        dw_count = 0;
                                }
@@ -615,7 +577,7 @@ vm_object_update_extent(
                                 *      End of a run of dirty/precious pages.
                                 */
                                if (dw_count) {
-                                       vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
                                        dwp = &dw_array[0];
                                        dw_count = 0;
                                }
@@ -636,6 +598,8 @@ vm_object_update_extent(
                                break;
 
                        case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
+                               if (m->dirty == TRUE)
+                                       dirty_count++;
                                dwp->dw_mask |= DW_vm_page_free;
                                break;
 
@@ -650,12 +614,6 @@ vm_object_update_extent(
                                data_cnt += PAGE_SIZE;
                                next_offset = offset + PAGE_SIZE_64;
 
-                               /*
-                                * Clean
-                                */
-                               m->list_req_pending = TRUE;
-                               m->cleaning = TRUE;
-
                                /*
                                 * wired pages shouldn't be flushed and
                                 * since they aren't on any queue,
@@ -667,10 +625,7 @@ vm_object_update_extent(
                                                /*
                                                 * add additional state for the flush
                                                 */
-                                               m->busy = TRUE;
-                                               m->pageout = TRUE;
-
-                                               dwp->dw_mask |= DW_vm_page_wire;
+                                               m->free_when_done = TRUE;
                                        }
                                        /*
                                         * we use to remove the page from the queues at this
@@ -688,7 +643,7 @@ vm_object_update_extent(
                                VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
 
                                if (dw_count >= dw_limit) {
-                                       vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
                                        dwp = &dw_array[0];
                                        dw_count = 0;
                                }
@@ -696,12 +651,15 @@ vm_object_update_extent(
                        break;
                }
        }
+       
+       if (object->pager)
+               task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager));
        /*
         *      We have completed the scan for applicable pages.
         *      Clean any pages that have been saved.
         */
        if (dw_count)
-               vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+               vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
 
        if (data_cnt) {
                LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
@@ -851,6 +809,7 @@ vm_object_update(
                fault_info.interruptible = THREAD_UNINT;
                fault_info.behavior  = VM_BEHAVIOR_SEQUENTIAL;
                fault_info.user_tag  = 0;
+               fault_info.pmap_options = 0;
                fault_info.lo_offset = copy_offset;
                fault_info.hi_offset = copy_size;
                fault_info.no_cache   = FALSE;
@@ -858,6 +817,7 @@ vm_object_update(
                fault_info.io_sync = FALSE;
                fault_info.cs_bypass = FALSE;
                fault_info.mark_zf_absent = FALSE;
+               fault_info.batch_pmap_op = FALSE;
 
                vm_object_paging_begin(copy_object);
 
@@ -867,9 +827,11 @@ vm_object_update(
                        assert(fault_info.cluster_size == copy_size - i);
 
                        prot =  VM_PROT_WRITE|VM_PROT_READ;
+                       page = VM_PAGE_NULL;
                        result = vm_fault_page(copy_object, i, 
                                               VM_PROT_WRITE|VM_PROT_READ,
                                               FALSE,
+                                              FALSE, /* page not looked up */
                                               &prot,
                                               &page,
                                               &top_page,
@@ -882,18 +844,17 @@ vm_object_update(
                        case VM_FAULT_SUCCESS:
                                if (top_page) {
                                        vm_fault_cleanup(
-                                               page->object, top_page);
+                                               VM_PAGE_OBJECT(page), top_page);
                                        vm_object_lock(copy_object);
                                        vm_object_paging_begin(copy_object);
                                }
-                               if (!page->active &&
-                                   !page->inactive &&
-                                   !page->throttled) {
+                               if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
+
                                        vm_page_lockspin_queues();
-                                       if (!page->active &&
-                                           !page->inactive &&
-                                           !page->throttled)
+                                       
+                                       if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
                                                vm_page_deactivate(page);
+                                       }
                                        vm_page_unlock_queues();
                                }
                                PAGE_WAKEUP_DONE(page);
@@ -942,6 +903,7 @@ vm_object_update(
        }
        if (copy_object != VM_OBJECT_NULL && copy_object != object) {
                if ((flags & MEMORY_OBJECT_DATA_PURGE)) {
+                       vm_object_lock_assert_exclusive(copy_object);
                        copy_object->shadow_severed = TRUE;
                        copy_object->shadowed = FALSE;
                        copy_object->shadow = NULL;
@@ -993,10 +955,10 @@ BYPASS_COW_COPYIN:
                num_of_extents = 0;
                e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1));
 
-               m = (vm_page_t) queue_first(&object->memq);
+               m = (vm_page_t) vm_page_queue_first(&object->memq);
 
-               while (!queue_end(&object->memq, (queue_entry_t) m)) {
-                       next = (vm_page_t) queue_next(&m->listq);
+               while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) m)) {
+                       next = (vm_page_t) vm_page_queue_next(&m->listq);
 
                        if ((m->offset >= start) && (m->offset < end)) {
                                /*
@@ -1148,7 +1110,7 @@ vm_object_set_attributes_common(
        boolean_t       may_cache,
        memory_object_copy_strategy_t copy_strategy,
        boolean_t       temporary,
-        boolean_t      silent_overwrite,
+       __unused boolean_t      silent_overwrite,
        boolean_t       advisory_pageout)
 {
        boolean_t       object_became_ready;
@@ -1172,11 +1134,6 @@ vm_object_set_attributes_common(
                        return(KERN_INVALID_ARGUMENT);
        }
 
-#if    !ADVISORY_PAGEOUT
-       if (silent_overwrite || advisory_pageout)
-               return(KERN_INVALID_ARGUMENT);
-
-#endif /* !ADVISORY_PAGEOUT */
        if (may_cache)
                may_cache = TRUE;
        if (temporary)
@@ -1192,7 +1149,7 @@ vm_object_set_attributes_common(
        object->copy_strategy = copy_strategy;
        object->can_persist = may_cache;
        object->temporary = temporary;
-       object->silent_overwrite = silent_overwrite;
+//     object->silent_overwrite = silent_overwrite;
        object->advisory_pageout = advisory_pageout;
 
        /*
@@ -1243,7 +1200,8 @@ memory_object_change_attributes(
        temporary = object->temporary;
        may_cache = object->can_persist;
        copy_strategy = object->copy_strategy;
-       silent_overwrite = object->silent_overwrite;
+//     silent_overwrite = object->silent_overwrite;
+       silent_overwrite = FALSE;
        advisory_pageout = object->advisory_pageout;
 #if notyet
        invalidate = object->invalidate;
@@ -1423,7 +1381,8 @@ memory_object_get_attributes(
                behave->invalidate = FALSE;
 #endif
                behave->advisory_pageout = object->advisory_pageout;
-               behave->silent_overwrite = object->silent_overwrite;
+//             behave->silent_overwrite = object->silent_overwrite;
+               behave->silent_overwrite = FALSE;
                 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
                break;
            }
@@ -1500,11 +1459,11 @@ memory_object_iopl_request(
        upl_t                   *upl_ptr,
        upl_page_info_array_t   user_page_list,
        unsigned int            *page_list_count,
-       int                     *flags)
+       upl_control_flags_t     *flags)
 {
        vm_object_t             object;
        kern_return_t           ret;
-       int                     caller_flags;
+       upl_control_flags_t     caller_flags;
 
        caller_flags = *flags;
 
@@ -1547,8 +1506,9 @@ memory_object_iopl_request(
                /* offset from beginning of named entry offset in object */
                offset = offset + named_entry->offset;
 
-               if(named_entry->is_sub_map) 
-                       return (KERN_INVALID_ARGUMENT);
+               if (named_entry->is_sub_map ||
+                   named_entry->is_copy)
+                       return KERN_INVALID_ARGUMENT;
                
                named_entry_lock(named_entry);
 
@@ -1607,8 +1567,6 @@ memory_object_iopl_request(
                return (KERN_INVALID_ARGUMENT);
 
        if (!object->private) {
-               if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
-                       *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
                if (object->phys_contiguous) {
                        *flags = UPL_PHYS_CONTIG;
                } else {
@@ -1660,7 +1618,7 @@ memory_object_upl_request(
                                     upl_ptr,
                                     user_page_list,
                                     page_list_count,
-                                    cntrl_flags);
+                                    (upl_control_flags_t)(unsigned int) cntrl_flags);
 }
 
 /*  
@@ -1698,7 +1656,7 @@ memory_object_super_upl_request(
                                           upl,
                                           user_page_list,
                                           page_list_count,
-                                          cntrl_flags);
+                                          (upl_control_flags_t)(unsigned int) cntrl_flags);
 }
 
 kern_return_t
@@ -1764,6 +1722,14 @@ host_default_memory_manager(
                returned_manager = current_manager;
                memory_object_default_reference(returned_manager);
        } else {
+               /*
+                *      Only allow the kernel to change the value.
+                */
+               extern task_t kernel_task;
+               if (current_task() != kernel_task) {
+                       result = KERN_NO_ACCESS;
+                       goto out;
+               }
 
                /*
                 *      If this is the first non-null manager, start
@@ -1793,7 +1759,6 @@ host_default_memory_manager(
 
                thread_wakeup((event_t) &memory_manager_default);
 
-#ifndef CONFIG_FREEZE
                /*
                 * Now that we have a default pager for anonymous memory,
                 * reactivate all the throttled pages (i.e. dirty pages with
@@ -1803,7 +1768,6 @@ host_default_memory_manager(
                {
                        vm_page_reactivate_all_throttled();
                }
-#endif
        }
  out:
        lck_mtx_unlock(&memory_manager_default_lock);
@@ -1969,6 +1933,57 @@ memory_object_mark_unused(
                vm_object_cache_add(object);
 }
 
+void
+memory_object_mark_io_tracking(
+       memory_object_control_t control)
+{
+       vm_object_t             object;
+
+       if (control == NULL)
+               return;
+       object = memory_object_control_to_vm_object(control);
+
+       if (object != VM_OBJECT_NULL) {
+               vm_object_lock(object);
+               object->io_tracking = TRUE;
+               vm_object_unlock(object);
+       }
+}
+
+#if CONFIG_SECLUDED_MEMORY
+void
+memory_object_mark_eligible_for_secluded(
+       memory_object_control_t control,
+       boolean_t               eligible_for_secluded)
+{
+       vm_object_t             object;
+
+       if (control == NULL)
+               return;
+       object = memory_object_control_to_vm_object(control);
+
+       if (object == VM_OBJECT_NULL) {
+               return;
+       }
+
+       vm_object_lock(object);
+       if (eligible_for_secluded &&
+           secluded_for_filecache && /* global boot-arg */
+           !object->eligible_for_secluded) {
+               object->eligible_for_secluded = TRUE;
+               vm_page_secluded.eligible_for_secluded += object->resident_page_count;
+       } else if (!eligible_for_secluded &&
+                  object->eligible_for_secluded) {
+               object->eligible_for_secluded = FALSE;
+               vm_page_secluded.eligible_for_secluded -= object->resident_page_count;
+               if (object->resident_page_count) {
+                       /* XXX FBDP TODO: flush pages from secluded queue? */
+                       // printf("FBDP TODO: flush %d pages from %p from secluded queue\n", object->resident_page_count, object);
+               }
+       }
+       vm_object_unlock(object);
+}
+#endif /* CONFIG_SECLUDED_MEMORY */
 
 kern_return_t
 memory_object_pages_resident(
@@ -2007,18 +2022,35 @@ memory_object_signed(
        return KERN_SUCCESS;
 }
 
+boolean_t
+memory_object_is_signed(
+       memory_object_control_t control)
+{
+       boolean_t       is_signed;
+       vm_object_t     object;
+
+       object = memory_object_control_to_vm_object(control);
+       if (object == VM_OBJECT_NULL)
+               return FALSE;
+
+       vm_object_lock_shared(object);
+       is_signed = object->code_signed;
+       vm_object_unlock(object);
+
+       return is_signed;
+}
+
 boolean_t
 memory_object_is_slid(
        memory_object_control_t control)
 {
        vm_object_t     object = VM_OBJECT_NULL;
-       vm_object_t     slide_object = slide_info.slide_object;
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL)
                return FALSE;
 
-       return (object == slide_object);
+       return object->object_slid;
 }
 
 static zone_t mem_obj_control_zone;
@@ -2326,19 +2358,6 @@ kern_return_t memory_object_data_reclaim
                reclaim_backing_store);
 }
 
-/* Routine memory_object_create */
-kern_return_t memory_object_create
-(
-       memory_object_default_t default_memory_manager,
-       vm_size_t new_memory_object_size,
-       memory_object_t *new_memory_object
-)
-{
-       return default_pager_memory_object_create(default_memory_manager,
-                                                 new_memory_object_size,
-                                                 new_memory_object);
-}
-
 upl_t
 convert_port_to_upl(
        ipc_port_t      port)