]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/memory_object.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
index 9a35734bc82282b30c724e58beb0b46007e74dc1..e4013fd8248c433b7617d10acd65c0bb21c33410 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
@@ -84,7 +84,6 @@
  */
 #include <string.h>             /* For memcpy() */
 
-#include <kern/xpr.h>
 #include <kern/host.h>
 #include <kern/thread.h>        /* For current_thread() */
 #include <kern/ipc_mig.h>
 #include <vm/vm_protos.h>
 
 memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
-decl_lck_mtx_data(, memory_manager_default_lock)
+LCK_MTX_EARLY_DECLARE(memory_manager_default_lock, &vm_object_lck_grp);
 
 
 /*
@@ -166,11 +165,6 @@ memory_object_lock_page(
        boolean_t               should_flush,
        vm_prot_t               prot)
 {
-       XPR(XPR_MEMORY_OBJECT,
-           "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
-           m, should_return, should_flush, prot, 0);
-
-
        if (m->vmp_busy || m->vmp_cleaning) {
                return MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK;
        }
@@ -447,10 +441,6 @@ vm_object_sync(
        boolean_t       rv;
        int             flags;
 
-       XPR(XPR_VM_OBJECT,
-           "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
-           object, offset, size, should_flush, should_return);
-
        /*
         * Lock the object, and acquire a paging reference to
         * prevent the memory_object and control ports from
@@ -542,15 +532,24 @@ vm_object_update_extent(
        vm_object_offset_t      next_offset = offset;
        memory_object_lock_result_t     page_lock_result;
        memory_object_cluster_size_t    data_cnt = 0;
-       struct vm_page_delayed_work     dw_array[DEFAULT_DELAYED_WORK_LIMIT];
-       struct vm_page_delayed_work     *dwp;
+       struct  vm_page_delayed_work    dw_array;
+       struct  vm_page_delayed_work    *dwp, *dwp_start;
+       bool            dwp_finish_ctx = TRUE;
        int             dw_count;
        int             dw_limit;
        int             dirty_count;
 
-       dwp = &dw_array[0];
+       dwp_start = dwp = NULL;
        dw_count = 0;
        dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+       dwp_start = vm_page_delayed_work_get_ctx();
+       if (dwp_start == NULL) {
+               dwp_start = &dw_array;
+               dw_limit = 1;
+               dwp_finish_ctx = FALSE;
+       }
+       dwp = dwp_start;
+
        dirty_count = 0;
 
        for (;
@@ -563,8 +562,8 @@ vm_object_update_extent(
                if (data_cnt) {
                        if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
                                if (dw_count) {
-                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
-                                       dwp = &dw_array[0];
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
+                                       dwp = dwp_start;
                                        dw_count = 0;
                                }
                                LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
@@ -582,8 +581,8 @@ vm_object_update_extent(
                                 *      End of a run of dirty/precious pages.
                                 */
                                if (dw_count) {
-                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
-                                       dwp = &dw_array[0];
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
+                                       dwp = dwp_start;
                                        dw_count = 0;
                                }
                                LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
@@ -648,8 +647,8 @@ vm_object_update_extent(
                                VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
 
                                if (dw_count >= dw_limit) {
-                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
-                                       dwp = &dw_array[0];
+                                       vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
+                                       dwp = dwp_start;
                                        dw_count = 0;
                                }
                        }
@@ -665,13 +664,19 @@ vm_object_update_extent(
         *      Clean any pages that have been saved.
         */
        if (dw_count) {
-               vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+               vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
        }
 
        if (data_cnt) {
                LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
                    paging_offset, offset_resid, io_errno, should_iosync);
        }
+
+       if (dwp_start && dwp_finish_ctx) {
+               vm_page_delayed_work_finish_ctx(dwp_start);
+               dwp_start = dwp = NULL;
+       }
+
        return retval;
 }
 
@@ -767,9 +772,9 @@ vm_object_update(
                }
        }
        if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) {
-               vm_map_size_t           i;
-               vm_map_size_t           copy_size;
-               vm_map_offset_t         copy_offset;
+               vm_object_offset_t      i;
+               vm_object_size_t        copy_size;
+               vm_object_offset_t      copy_offset;
                vm_prot_t               prot;
                vm_page_t               page;
                vm_page_t               top_page;
@@ -781,8 +786,7 @@ vm_object_update(
                         * translate offset with respect to shadow's offset
                         */
                        copy_offset = (offset >= copy_object->vo_shadow_offset) ?
-                           (vm_map_offset_t)(offset - copy_object->vo_shadow_offset) :
-                           (vm_map_offset_t) 0;
+                           (offset - copy_object->vo_shadow_offset) : 0;
 
                        if (copy_offset > copy_object->vo_size) {
                                copy_offset = copy_object->vo_size;
@@ -794,7 +798,7 @@ vm_object_update(
                        if (offset >= copy_object->vo_shadow_offset) {
                                copy_size = size;
                        } else if (size >= copy_object->vo_shadow_offset - offset) {
-                               copy_size = size - (copy_object->vo_shadow_offset - offset);
+                               copy_size = (size - (copy_object->vo_shadow_offset - offset));
                        } else {
                                copy_size = 0;
                        }
@@ -880,7 +884,7 @@ RETRY_COW_OF_LOCK_REQUEST:
                                /* success but no VM page: fail */
                                vm_object_paging_end(copy_object);
                                vm_object_unlock(copy_object);
-                       /*FALLTHROUGH*/
+                               OS_FALLTHROUGH;
                        case VM_FAULT_MEMORY_ERROR:
                                if (object != copy_object) {
                                        vm_object_deallocate(copy_object);
@@ -1058,10 +1062,6 @@ vm_object_set_attributes_common(
 {
        boolean_t       object_became_ready;
 
-       XPR(XPR_MEMORY_OBJECT,
-           "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
-           object, (may_cache & 1), copy_strategy, 0, 0);
-
        if (object == VM_OBJECT_NULL) {
                return KERN_INVALID_ARGUMENT;
        }
@@ -1415,7 +1415,7 @@ memory_object_iopl_request(
        if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
                vm_named_entry_t        named_entry;
 
-               named_entry = (vm_named_entry_t)port->ip_kobject;
+               named_entry = (vm_named_entry_t) ip_get_kobject(port);
                /* a few checks to make sure user is obeying rules */
                if (*upl_size == 0) {
                        if (offset >= named_entry->size) {
@@ -1445,15 +1445,20 @@ memory_object_iopl_request(
                /* the callers parameter offset is defined to be the */
                /* offset from beginning of named entry offset in object */
                offset = offset + named_entry->offset;
+               offset += named_entry->data_offset;
 
                if (named_entry->is_sub_map ||
                    named_entry->is_copy) {
                        return KERN_INVALID_ARGUMENT;
                }
+               if (!named_entry->is_object) {
+                       return KERN_INVALID_ARGUMENT;
+               }
 
                named_entry_lock(named_entry);
 
-               object = named_entry->backing.object;
+               object = vm_named_entry_to_vm_object(named_entry);
+               assert(object != VM_OBJECT_NULL);
                vm_object_reference(object);
                named_entry_unlock(named_entry);
        } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) {
@@ -1517,6 +1522,8 @@ memory_object_upl_request(
        int                     tag)
 {
        vm_object_t             object;
+       vm_tag_t                vmtag = (vm_tag_t)tag;
+       assert(vmtag == tag);
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL) {
@@ -1530,7 +1537,7 @@ memory_object_upl_request(
                   user_page_list,
                   page_list_count,
                   (upl_control_flags_t)(unsigned int) cntrl_flags,
-                  tag);
+                  vmtag);
 }
 
 /*
@@ -1557,6 +1564,8 @@ memory_object_super_upl_request(
        int                     tag)
 {
        vm_object_t             object;
+       vm_tag_t                vmtag = (vm_tag_t)tag;
+       assert(vmtag == tag);
 
        object = memory_object_control_to_vm_object(control);
        if (object == VM_OBJECT_NULL) {
@@ -1571,7 +1580,7 @@ memory_object_super_upl_request(
                   user_page_list,
                   page_list_count,
                   (upl_control_flags_t)(unsigned int) cntrl_flags,
-                  tag);
+                  vmtag);
 }
 
 kern_return_t
@@ -1762,15 +1771,6 @@ memory_manager_default_check(void)
        }
 }
 
-__private_extern__ void
-memory_manager_default_init(void)
-{
-       memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
-       lck_mtx_init(&memory_manager_default_lock, &vm_object_lck_grp, &vm_object_lck_attr);
-}
-
-
-
 /* Allow manipulation of individual page state.  This is actually part of */
 /* the UPL regimen but takes place on the object rather than on a UPL */
 
@@ -1879,6 +1879,24 @@ memory_object_mark_io_tracking(
        }
 }
 
+void
+memory_object_mark_trusted(
+       memory_object_control_t control)
+{
+       vm_object_t             object;
+
+       if (control == NULL) {
+               return;
+       }
+       object = memory_object_control_to_vm_object(control);
+
+       if (object != VM_OBJECT_NULL) {
+               vm_object_lock(object);
+               object->pager_trusted = TRUE;
+               vm_object_unlock(object);
+       }
+}
+
 #if CONFIG_SECLUDED_MEMORY
 void
 memory_object_mark_eligible_for_secluded(
@@ -1988,19 +2006,8 @@ memory_object_is_shared_cache(
        return object->object_is_shared_cache;
 }
 
-static zone_t mem_obj_control_zone;
-
-__private_extern__ void
-memory_object_control_bootstrap(void)
-{
-       int     i;
-
-       i = (vm_size_t) sizeof(struct memory_object_control);
-       mem_obj_control_zone = zinit(i, 8192 * i, 4096, "mem_obj_control");
-       zone_change(mem_obj_control_zone, Z_CALLERACCT, FALSE);
-       zone_change(mem_obj_control_zone, Z_NOENCRYPT, TRUE);
-       return;
-}
+static ZONE_DECLARE(mem_obj_control_zone, "mem_obj_control",
+    sizeof(struct memory_object_control), ZC_NOENCRYPT);
 
 __private_extern__ memory_object_control_t
 memory_object_control_allocate(
@@ -2336,7 +2343,7 @@ convert_port_to_upl(
                ip_unlock(port);
                return (upl_t)NULL;
        }
-       upl = (upl_t) port->ip_kobject;
+       upl = (upl_t) ip_get_kobject(port);
        ip_unlock(port);
        upl_lock(upl);
        upl->ref_count += 1;