]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_user.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
index 7b1eb07034e0ae94775e026f09c738efe84844f6..886dbb6ffdc3fe970647bb6828577527aeaf1b90 100644 (file)
@@ -98,6 +98,7 @@
 #include <mach/vm_param.h>
 #include <mach/vm_statistics.h>
 #include <mach/mach_syscalls.h>
+#include <mach/sdt.h>
 
 #include <mach/host_priv_server.h>
 #include <mach/mach_vm_server.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_protos.h>
 #include <vm/vm_purgeable_internal.h>
+#include <vm/vm_init.h>
 
 vm_size_t        upl_offset_to_pagelist = 0;
 
@@ -122,6 +124,8 @@ vm_size_t        upl_offset_to_pagelist = 0;
 #include <vm/cpm.h>
 #endif /* VM_CPM */
 
+lck_grp_t      dynamic_pager_control_port_lock_group;
+decl_lck_mtx_data(, dynamic_pager_control_port_lock);
 ipc_port_t     dynamic_pager_control_port=NULL;
 
 /*
@@ -294,7 +298,7 @@ mach_vm_deallocate(
  */
 kern_return_t
 vm_deallocate(
-       register vm_map_t       map,
+       vm_map_t                map,
        vm_offset_t             start,
        vm_size_t               size)
 {
@@ -346,7 +350,7 @@ mach_vm_inherit(
  */
 kern_return_t
 vm_inherit(
-       register vm_map_t       map,
+       vm_map_t                map,
        vm_offset_t             start,
        vm_size_t               size,
        vm_inherit_t            new_inheritance)
@@ -1163,7 +1167,7 @@ mach_vm_wire(
 kern_return_t
 vm_wire(
        host_priv_t             host_priv,
-       register vm_map_t       map,
+       vm_map_t                map,
        vm_offset_t             start,
        vm_size_t               size,
        vm_prot_t               access)
@@ -1303,6 +1307,7 @@ vm_toggle_entry_reuse(int toggle, int *old_value)
 {
        vm_map_t map = current_map();
        
+       assert(!map->is_nested_map);
        if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){
                *old_value = map->disable_vmentry_reuse;
        } else if(toggle == VM_TOGGLE_SET){
@@ -1341,21 +1346,37 @@ kern_return_t
 mach_vm_behavior_set(
        vm_map_t                map,
        mach_vm_offset_t        start,
-       mach_vm_size_t  size,
+       mach_vm_size_t          size,
        vm_behavior_t           new_behavior)
 {
+       vm_map_offset_t align_mask;
+
        if ((map == VM_MAP_NULL) || (start + size < start))
                return(KERN_INVALID_ARGUMENT);
 
        if (size == 0)
                return KERN_SUCCESS;
 
-       return(vm_map_behavior_set(map,
-                                  vm_map_trunc_page(start,
-                                                    VM_MAP_PAGE_MASK(map)), 
-                                  vm_map_round_page(start+size,
-                                                    VM_MAP_PAGE_MASK(map)),
-                                  new_behavior));
+       switch (new_behavior) {
+       case VM_BEHAVIOR_REUSABLE:
+       case VM_BEHAVIOR_REUSE:
+       case VM_BEHAVIOR_CAN_REUSE:
+               /*
+                * Align to the hardware page size, to allow
+                * malloc() to maximize the amount of re-usability,
+                * even on systems with larger software page size.
+                */
+               align_mask = PAGE_MASK;
+               break;
+       default:
+               align_mask = VM_MAP_PAGE_MASK(map);
+               break;
+       }
+
+       return vm_map_behavior_set(map,
+                                  vm_map_trunc_page(start, align_mask),
+                                  vm_map_round_page(start+size, align_mask),
+                                  new_behavior);
 }
 
 /*
@@ -1378,18 +1399,13 @@ vm_behavior_set(
        vm_size_t               size,
        vm_behavior_t           new_behavior)
 {
-       if ((map == VM_MAP_NULL) || (start + size < start))
-               return(KERN_INVALID_ARGUMENT);
-
-       if (size == 0)
-               return KERN_SUCCESS;
+       if (start + size < start)
+               return KERN_INVALID_ARGUMENT;
 
-       return(vm_map_behavior_set(map,
-                                  vm_map_trunc_page(start,
-                                                    VM_MAP_PAGE_MASK(map)), 
-                                  vm_map_round_page(start+size,
-                                                    VM_MAP_PAGE_MASK(map)),
-                                  new_behavior));
+       return mach_vm_behavior_set(map,
+                                   (mach_vm_offset_t) start,
+                                   (mach_vm_size_t) size,
+                                   new_behavior);
 }
 
 /*
@@ -1851,6 +1867,7 @@ vm_map_get_upl(
        return kr;
 }
 
+
 /*
  * mach_make_memory_entry_64
  *
@@ -1913,6 +1930,7 @@ mach_make_memory_entry_64(
        if (((permission & 0x00FF0000) &
             ~(MAP_MEM_ONLY |
               MAP_MEM_NAMED_CREATE |
+              MAP_MEM_GRAB_SECLUDED | /* XXX FBDP TODO: restrict usage? */
               MAP_MEM_PURGABLE | 
               MAP_MEM_NAMED_REUSE |
               MAP_MEM_USE_DATA_ADDR |
@@ -2044,6 +2062,25 @@ mach_make_memory_entry_64(
                        vm_object_unlock(object);
                }
 
+#if CONFIG_SECLUDED_MEMORY
+               if (secluded_for_iokit && /* global boot-arg */
+                   ((permission & MAP_MEM_GRAB_SECLUDED)
+#if 11
+                    /* XXX FBDP for my testing only */
+                    || (secluded_for_fbdp && map_size == 97550336)
+#endif
+                           )) {
+#if 11
+                       if (!(permission & MAP_MEM_GRAB_SECLUDED) &&
+                           secluded_for_fbdp) {
+                               printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size);
+                       }
+#endif
+                       object->can_grab_secluded = TRUE;
+                       assert(!object->eligible_for_secluded);
+               }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
                /*
                 * The VM object is brand new and nobody else knows about it,
                 * so we don't need to lock it.
@@ -2114,11 +2151,11 @@ mach_make_memory_entry_64(
                        offset_in_page = 0;
                }
 
-               kr = vm_map_copyin(target_map,
-                                  map_start,
-                                  map_size,
-                                  FALSE,
-                                  &copy);
+               kr = vm_map_copyin_internal(target_map,
+                                           map_start,
+                                           map_size,
+                                           VM_MAP_COPYIN_ENTRY_LIST,
+                                           &copy);
                if (kr != KERN_SUCCESS) {
                        return kr;
                }
@@ -2163,6 +2200,7 @@ mach_make_memory_entry_64(
                        offset_in_page = 0;
                }
 
+               cur_prot = VM_PROT_ALL;
                kr = vm_map_copy_extract(target_map,
                                         map_start,
                                         map_size,
@@ -2263,8 +2301,9 @@ redo_lookup:
                         */
                        protections &= prot;
                }
+
                if (((prot & protections) != protections) 
-                                       || (object == kernel_object)) {
+                   || (object == kernel_object)) {
                        kr = KERN_INVALID_RIGHT;
                        vm_object_unlock(object);
                        vm_map_unlock_read(target_map);
@@ -2616,6 +2655,7 @@ redo_lookup:
                }
 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
 
+               vm_object_lock_assert_exclusive(object);
                object->true_share = TRUE;
                if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
                        object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
@@ -2797,8 +2837,8 @@ redo_lookup:
                   assert(object != VM_OBJECT_NULL);
                   user_entry->backing.object = object;
                   /* we now point to this object, hold on */
-                  vm_object_reference(object); 
                   vm_object_lock(object);
+                  vm_object_reference_locked(object); 
 #if VM_OBJECT_TRACKING_OP_TRUESHARE
                if (!object->true_share &&
                    vm_object_tracking_inited) {
@@ -3290,19 +3330,30 @@ mach_memory_entry_range_op(
        return kr;
 }
 
+static void dp_control_port_init(void)
+{
+       lck_grp_init(&dynamic_pager_control_port_lock_group,"dp_control_port", LCK_GRP_ATTR_NULL);
+       lck_mtx_init(&dynamic_pager_control_port_lock, &dynamic_pager_control_port_lock_group, LCK_ATTR_NULL);
+}
 
 kern_return_t
 set_dp_control_port(
        host_priv_t     host_priv,
        ipc_port_t      control_port)   
 {
-        if (host_priv == HOST_PRIV_NULL)
-                return (KERN_INVALID_HOST);
+       ipc_port_t old_port;
 
-       if (IP_VALID(dynamic_pager_control_port))
-               ipc_port_release_send(dynamic_pager_control_port);
+       if (host_priv == HOST_PRIV_NULL)
+                return (KERN_INVALID_HOST);
 
+       lck_mtx_lock(&dynamic_pager_control_port_lock);
+       old_port = dynamic_pager_control_port;
        dynamic_pager_control_port = control_port;
+       lck_mtx_unlock(&dynamic_pager_control_port_lock);
+
+       if (IP_VALID(old_port))
+               ipc_port_release_send(old_port);
+
        return KERN_SUCCESS;
 }
 
@@ -3311,10 +3362,13 @@ get_dp_control_port(
        host_priv_t     host_priv,
        ipc_port_t      *control_port)  
 {
-        if (host_priv == HOST_PRIV_NULL)
+       if (host_priv == HOST_PRIV_NULL)
                 return (KERN_INVALID_HOST);
 
+       lck_mtx_lock(&dynamic_pager_control_port_lock);
        *control_port = ipc_port_copy_send(dynamic_pager_control_port);
+       lck_mtx_unlock(&dynamic_pager_control_port_lock);
+
        return KERN_SUCCESS;
        
 }
@@ -3562,7 +3616,7 @@ vm_map_get_phys_page(
                                        break;
                                }
                        } else {
-                               phys_page = (ppnum_t)(dst_page->phys_page);
+                               phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page));
                                vm_object_unlock(object);
                                break;
                        }
@@ -3575,6 +3629,11 @@ vm_map_get_phys_page(
        return phys_page;
 }
 
+void
+vm_user_init(void)
+{
+       dp_control_port_init();
+}
 
 #if 0
 kern_return_t kernel_object_iopl_request(      /* forward */