]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/memory_object.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
index e8ace8c9b90fb0cc128c1657ee7947afe6a8700b..d37eb42243d5df1977a7787f8dda6207e331ff2a 100644 (file)
@@ -129,7 +129,7 @@ decl_lck_mtx_data(, memory_manager_default_lock)
 
 #define        memory_object_should_return_page(m, should_return) \
     (should_return != MEMORY_OBJECT_RETURN_NONE && \
-     (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
+     (((m)->dirty || ((m)->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \
       ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
       (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
 
@@ -212,7 +212,7 @@ memory_object_lock_page(
                 * for the page to go from the clean to the dirty state
                 * after we've made our decision
                 */
-               if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED) {
+               if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
                        SET_PAGE_DIRTY(m, FALSE);
                }
        } else {
@@ -222,7 +222,7 @@ memory_object_lock_page(
                 * (pmap_page_protect may not increase protection).
                 */
                if (prot != VM_PROT_NO_CHANGE)
-                       pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot);
+                       pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m), VM_PROT_ALL & ~prot);
        }
        /*
         *      Handle returning dirty or precious pages
@@ -238,7 +238,7 @@ memory_object_lock_page(
                 * faulted back into an address space
                 *
                 *      if (!should_flush)
-                *              pmap_disconnect(m->phys_page);
+                *              pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
                 */
                return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
        }
@@ -513,7 +513,8 @@ MACRO_BEGIN                                                         \
        }                                                               \
 MACRO_END
 
-
+extern struct vnode *
+vnode_pager_lookup_vnode(memory_object_t);
 
 static int
 vm_object_update_extent(
@@ -537,10 +538,12 @@ vm_object_update_extent(
        struct vm_page_delayed_work     *dwp;
        int             dw_count;
        int             dw_limit;
+       int             dirty_count;
 
         dwp = &dw_array[0];
         dw_count = 0;
        dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+       dirty_count = 0;
 
        for (;
             offset < offset_end && object->resident_page_count;
@@ -595,6 +598,8 @@ vm_object_update_extent(
                                break;
 
                        case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
+                               if (m->dirty == TRUE)
+                                       dirty_count++;
                                dwp->dw_mask |= DW_vm_page_free;
                                break;
 
@@ -620,7 +625,7 @@ vm_object_update_extent(
                                                /*
                                                 * add additional state for the flush
                                                 */
-                                               m->pageout = TRUE;
+                                               m->free_when_done = TRUE;
                                        }
                                        /*
                                         * we use to remove the page from the queues at this
@@ -646,6 +651,9 @@ vm_object_update_extent(
                        break;
                }
        }
+       
+       if (object->pager)
+               task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager));
        /*
         *      We have completed the scan for applicable pages.
         *      Clean any pages that have been saved.
@@ -836,18 +844,17 @@ vm_object_update(
                        case VM_FAULT_SUCCESS:
                                if (top_page) {
                                        vm_fault_cleanup(
-                                               page->object, top_page);
+                                               VM_PAGE_OBJECT(page), top_page);
                                        vm_object_lock(copy_object);
                                        vm_object_paging_begin(copy_object);
                                }
-                               if (!page->active &&
-                                   !page->inactive &&
-                                   !page->throttled) {
+                               if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
+
                                        vm_page_lockspin_queues();
-                                       if (!page->active &&
-                                           !page->inactive &&
-                                           !page->throttled)
+                                       
+                                       if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
                                                vm_page_deactivate(page);
+                                       }
                                        vm_page_unlock_queues();
                                }
                                PAGE_WAKEUP_DONE(page);
@@ -896,6 +903,7 @@ vm_object_update(
        }
        if (copy_object != VM_OBJECT_NULL && copy_object != object) {
                if ((flags & MEMORY_OBJECT_DATA_PURGE)) {
+                       vm_object_lock_assert_exclusive(copy_object);
                        copy_object->shadow_severed = TRUE;
                        copy_object->shadowed = FALSE;
                        copy_object->shadow = NULL;
@@ -947,10 +955,10 @@ BYPASS_COW_COPYIN:
                num_of_extents = 0;
                e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1));
 
-               m = (vm_page_t) queue_first(&object->memq);
+               m = (vm_page_t) vm_page_queue_first(&object->memq);
 
-               while (!queue_end(&object->memq, (queue_entry_t) m)) {
-                       next = (vm_page_t) queue_next(&m->listq);
+               while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) m)) {
+                       next = (vm_page_t) vm_page_queue_next(&m->listq);
 
                        if ((m->offset >= start) && (m->offset < end)) {
                                /*
@@ -1037,79 +1045,17 @@ BYPASS_COW_COPYIN:
 }
 
 
-/*
- *     Routine:        memory_object_synchronize_completed [user interface]
- *
- *     Tell kernel that previously synchronized data
- *     (memory_object_synchronize) has been queue or placed on the
- *     backing storage.
- *
- *     Note: there may be multiple synchronize requests for a given
- *     memory object outstanding but they will not overlap.
- */
-
-kern_return_t
-memory_object_synchronize_completed(
-       memory_object_control_t control,
-       memory_object_offset_t  offset,
-       memory_object_size_t    length)
-{
-       vm_object_t                     object;
-       msync_req_t                     msr;
-
-       object = memory_object_control_to_vm_object(control);
-
-        XPR(XPR_MEMORY_OBJECT,
-           "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
-           object, offset, length, 0, 0);
-
-       /*
-        *      Look for bogus arguments
-        */
-
-       if (object == VM_OBJECT_NULL)
-               return (KERN_INVALID_ARGUMENT);
-
-       vm_object_lock(object);
-
-/*
- *     search for sync request structure
- */
-       queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
-               if (msr->offset == offset && msr->length == length) {
-                       queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
-                       break;
-               }
-        }/* queue_iterate */
-
-       if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
-               vm_object_unlock(object);
-               return KERN_INVALID_ARGUMENT;
-       }
-
-       msr_lock(msr);
-       vm_object_unlock(object);
-       msr->flag = VM_MSYNC_DONE;
-       msr_unlock(msr);
-       thread_wakeup((event_t) msr);
-
-       return KERN_SUCCESS;
-}/* memory_object_synchronize_completed */
-
 static kern_return_t
 vm_object_set_attributes_common(
        vm_object_t     object,
        boolean_t       may_cache,
-       memory_object_copy_strategy_t copy_strategy,
-       boolean_t       temporary,
-       __unused boolean_t      silent_overwrite,
-       boolean_t       advisory_pageout)
+       memory_object_copy_strategy_t copy_strategy)
 {
        boolean_t       object_became_ready;
 
         XPR(XPR_MEMORY_OBJECT,
            "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
-           object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
+           object, (may_cache&1), copy_strategy, 0, 0);
 
        if (object == VM_OBJECT_NULL)
                return(KERN_INVALID_ARGUMENT);
@@ -1128,8 +1074,6 @@ vm_object_set_attributes_common(
 
        if (may_cache)
                may_cache = TRUE;
-       if (temporary)
-               temporary = TRUE;
 
        vm_object_lock(object);
 
@@ -1140,9 +1084,6 @@ vm_object_set_attributes_common(
        object_became_ready = !object->pager_ready;
        object->copy_strategy = copy_strategy;
        object->can_persist = may_cache;
-       object->temporary = temporary;
-//     object->silent_overwrite = silent_overwrite;
-       object->advisory_pageout = advisory_pageout;
 
        /*
         *      Wake up anyone waiting for the ready attribute
@@ -1159,6 +1100,18 @@ vm_object_set_attributes_common(
        return(KERN_SUCCESS);
 }
 
+
+kern_return_t
+memory_object_synchronize_completed(
+                       __unused    memory_object_control_t control,
+                       __unused    memory_object_offset_t  offset,
+                       __unused    memory_object_size_t    length)
+{
+        panic("memory_object_synchronize_completed no longer supported\n");
+       return(KERN_FAILURE);
+}
+
+
 /*
  *     Set the memory object attribute as provided.
  *
@@ -1176,12 +1129,9 @@ memory_object_change_attributes(
 {
        vm_object_t                     object;
        kern_return_t                   result = KERN_SUCCESS;
-       boolean_t                       temporary;
        boolean_t                       may_cache;
        boolean_t                       invalidate;
        memory_object_copy_strategy_t   copy_strategy;
-       boolean_t                       silent_overwrite;
-       boolean_t                       advisory_pageout;
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL)
@@ -1189,12 +1139,8 @@ memory_object_change_attributes(
 
        vm_object_lock(object);
 
-       temporary = object->temporary;
        may_cache = object->can_persist;
        copy_strategy = object->copy_strategy;
-//     silent_overwrite = object->silent_overwrite;
-       silent_overwrite = FALSE;
-       advisory_pageout = object->advisory_pageout;
 #if notyet
        invalidate = object->invalidate;
 #endif
@@ -1212,7 +1158,6 @@ memory_object_change_attributes(
 
                 behave = (old_memory_object_behave_info_t) attributes;
 
-               temporary = behave->temporary;
                invalidate = behave->invalidate;
                copy_strategy = behave->copy_strategy;
 
@@ -1230,11 +1175,8 @@ memory_object_change_attributes(
 
                 behave = (memory_object_behave_info_t) attributes;
 
-               temporary = behave->temporary;
                invalidate = behave->invalidate;
                copy_strategy = behave->copy_strategy;
-               silent_overwrite = behave->silent_overwrite;
-               advisory_pageout = behave->advisory_pageout;
                break;
            }
 
@@ -1284,7 +1226,6 @@ memory_object_change_attributes(
 
                copy_strategy = attr->copy_strategy;
                 may_cache = attr->may_cache_object;
-               temporary = attr->temporary;
 
                break;
            }
@@ -1299,9 +1240,6 @@ memory_object_change_attributes(
 
        if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
                copy_strategy = MEMORY_OBJECT_COPY_DELAY;
-               temporary = TRUE;
-       } else {
-               temporary = FALSE;
        }
 
        /*
@@ -1310,10 +1248,7 @@ memory_object_change_attributes(
         */
        return (vm_object_set_attributes_common(object,
                                                     may_cache,
-                                                    copy_strategy,
-                                                    temporary,
-                                                    silent_overwrite,
-                                                    advisory_pageout));
+                                                    copy_strategy));
 }
 
 kern_return_t
@@ -1344,7 +1279,7 @@ memory_object_get_attributes(
 
                behave = (old_memory_object_behave_info_t) attributes;
                behave->copy_strategy = object->copy_strategy;
-               behave->temporary = object->temporary;
+               behave->temporary = FALSE;
 #if notyet     /* remove when vm_msync complies and clean in place fini */
                 behave->invalidate = object->invalidate;
 #else
@@ -1366,14 +1301,13 @@ memory_object_get_attributes(
 
                 behave = (memory_object_behave_info_t) attributes;
                 behave->copy_strategy = object->copy_strategy;
-               behave->temporary = object->temporary;
+               behave->temporary = FALSE;
 #if notyet     /* remove when vm_msync complies and clean in place fini */
                 behave->invalidate = object->invalidate;
 #else
                behave->invalidate = FALSE;
 #endif
-               behave->advisory_pageout = object->advisory_pageout;
-//             behave->silent_overwrite = object->silent_overwrite;
+               behave->advisory_pageout = FALSE;
                behave->silent_overwrite = FALSE;
                 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
                break;
@@ -1426,7 +1360,7 @@ memory_object_get_attributes(
                attr->copy_strategy = object->copy_strategy;
                attr->cluster_size = PAGE_SIZE;
                attr->may_cache_object = object->can_persist;
-               attr->temporary = object->temporary;
+               attr->temporary = FALSE;
 
                 *count = MEMORY_OBJECT_ATTR_INFO_COUNT;
                 break;
@@ -1451,7 +1385,8 @@ memory_object_iopl_request(
        upl_t                   *upl_ptr,
        upl_page_info_array_t   user_page_list,
        unsigned int            *page_list_count,
-       upl_control_flags_t     *flags)
+       upl_control_flags_t     *flags,
+       vm_tag_t                tag)
 {
        vm_object_t             object;
        kern_return_t           ret;
@@ -1504,45 +1439,9 @@ memory_object_iopl_request(
                
                named_entry_lock(named_entry);
 
-               if (named_entry->is_pager) {
-                       object = vm_object_enter(named_entry->backing.pager, 
-                                       named_entry->offset + named_entry->size, 
-                                       named_entry->internal, 
-                                       FALSE,
-                                       FALSE);
-                       if (object == VM_OBJECT_NULL) {
-                               named_entry_unlock(named_entry);
-                               return(KERN_INVALID_OBJECT);
-                       }
-
-                       /* JMM - drop reference on pager here? */
-
-                       /* create an extra reference for the named entry */
-                       vm_object_lock(object);
-                       vm_object_reference_locked(object);
-                       named_entry->backing.object = object;
-                       named_entry->is_pager = FALSE;
-                       named_entry_unlock(named_entry);
-
-                       /* wait for object to be ready */
-                       while (!object->pager_ready) {
-                               vm_object_wait(object,
-                                               VM_OBJECT_EVENT_PAGER_READY,
-                                               THREAD_UNINT);
-                               vm_object_lock(object);
-                       }
-                       vm_object_unlock(object);
-               } else {
-                       /* This is the case where we are going to map */
-                       /* an already mapped object.  If the object is */
-                       /* not ready it is internal.  An external     */
-                       /* object cannot be mapped until it is ready  */
-                       /* we can therefore avoid the ready check     */
-                       /* in this case.  */
-                       object = named_entry->backing.object;
-                       vm_object_reference(object);
-                       named_entry_unlock(named_entry);
-               }
+               object = named_entry->backing.object;
+               vm_object_reference(object);
+               named_entry_unlock(named_entry);
        } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) {
                memory_object_control_t control;
                control = (memory_object_control_t) port;
@@ -1574,7 +1473,8 @@ memory_object_iopl_request(
                                     upl_ptr,
                                     user_page_list,
                                     page_list_count,
-                                    caller_flags);
+                                    caller_flags,
+                                    tag);
        vm_object_deallocate(object);
        return ret;
 }
@@ -1596,7 +1496,8 @@ memory_object_upl_request(
        upl_t                   *upl_ptr,
        upl_page_info_array_t   user_page_list,
        unsigned int            *page_list_count,
-       int                     cntrl_flags)
+       int                     cntrl_flags,
+       int                     tag)
 {
        vm_object_t             object;
 
@@ -1610,7 +1511,8 @@ memory_object_upl_request(
                                     upl_ptr,
                                     user_page_list,
                                     page_list_count,
-                                    (upl_control_flags_t)(unsigned int) cntrl_flags);
+                                    (upl_control_flags_t)(unsigned int) cntrl_flags,
+                                    tag);
 }
 
 /*  
@@ -1633,7 +1535,8 @@ memory_object_super_upl_request(
        upl_t                   *upl,
        upl_page_info_t         *user_page_list,
        unsigned int            *page_list_count,
-       int                     cntrl_flags)
+       int                     cntrl_flags,
+       int                     tag)
 {
        vm_object_t             object;
 
@@ -1648,7 +1551,8 @@ memory_object_super_upl_request(
                                           upl,
                                           user_page_list,
                                           page_list_count,
-                                          (upl_control_flags_t)(unsigned int) cntrl_flags);
+                                          (upl_control_flags_t)(unsigned int) cntrl_flags,
+                                          tag);
 }
 
 kern_return_t
@@ -1672,12 +1576,6 @@ memory_object_cluster_size(memory_object_control_t control, memory_object_offset
 }
 
 
-int vm_stat_discard_cleared_reply = 0;
-int vm_stat_discard_cleared_unset = 0;
-int vm_stat_discard_cleared_too_late = 0;
-
-
-
 /*
  *     Routine:        host_default_memory_manager [interface]
  *     Purpose:
@@ -1942,6 +1840,41 @@ memory_object_mark_io_tracking(
        }
 }
 
+#if CONFIG_SECLUDED_MEMORY
+void
+memory_object_mark_eligible_for_secluded(
+       memory_object_control_t control,
+       boolean_t               eligible_for_secluded)
+{
+       vm_object_t             object;
+
+       if (control == NULL)
+               return;
+       object = memory_object_control_to_vm_object(control);
+
+       if (object == VM_OBJECT_NULL) {
+               return;
+       }
+
+       vm_object_lock(object);
+       if (eligible_for_secluded &&
+           secluded_for_filecache && /* global boot-arg */
+           !object->eligible_for_secluded) {
+               object->eligible_for_secluded = TRUE;
+               vm_page_secluded.eligible_for_secluded += object->resident_page_count;
+       } else if (!eligible_for_secluded &&
+                  object->eligible_for_secluded) {
+               object->eligible_for_secluded = FALSE;
+               vm_page_secluded.eligible_for_secluded -= object->resident_page_count;
+               if (object->resident_page_count) {
+                       /* XXX FBDP TODO: flush pages from secluded queue? */
+                       // printf("FBDP TODO: flush %d pages from %p from secluded queue\n", object->resident_page_count, object);
+               }
+       }
+       vm_object_unlock(object);
+}
+#endif /* CONFIG_SECLUDED_MEMORY */
+
 kern_return_t
 memory_object_pages_resident(
        memory_object_control_t control,
@@ -2059,6 +1992,22 @@ memory_object_control_to_vm_object(
        return (control->moc_object);
 }
 
+__private_extern__ vm_object_t
+memory_object_to_vm_object(
+       memory_object_t mem_obj)
+{
+       memory_object_control_t mo_control;
+
+       if (mem_obj == MEMORY_OBJECT_NULL) {
+               return VM_OBJECT_NULL;
+       }
+       mo_control = mem_obj->mo_control;
+       if (mo_control == NULL) {
+               return VM_OBJECT_NULL;
+       }
+       return memory_object_control_to_vm_object(mo_control);
+}
+
 memory_object_control_t
 convert_port_to_mo_control(
        __unused mach_port_t    port)
@@ -2253,6 +2202,8 @@ kern_return_t memory_object_synchronize
        vm_sync_t sync_flags
 )
 {
+        panic("memory_object_syncrhonize no longer supported\n");
+
        return (memory_object->mo_pager_ops->memory_object_synchronize)(
                memory_object,
                offset,
@@ -2315,19 +2266,6 @@ kern_return_t memory_object_data_reclaim
                reclaim_backing_store);
 }
 
-/* Routine memory_object_create */
-kern_return_t memory_object_create
-(
-       memory_object_default_t default_memory_manager,
-       vm_size_t new_memory_object_size,
-       memory_object_t *new_memory_object
-)
-{
-       return default_pager_memory_object_create(default_memory_manager,
-                                                 new_memory_object_size,
-                                                 new_memory_object);
-}
-
 upl_t
 convert_port_to_upl(
        ipc_port_t      port)