]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_user.c
xnu-2050.22.13.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
index de18c16a1f3e14d0afddcda7f88e17a61ccbe8a8..05b51b4b20c04126bf13e8001b8df9fc279f80aa 100644 (file)
@@ -881,12 +881,17 @@ mach_vm_map(
        vm_prot_t               max_protection,
        vm_inherit_t            inheritance)
 {
+       kern_return_t           kr;
+       vm_map_offset_t         vmmaddr;
+
+       vmmaddr = (vm_map_offset_t) *address;
+
        /* filter out any kernel-only flags */
        if (flags & ~VM_FLAGS_USER_MAP)
                return KERN_INVALID_ARGUMENT;
 
-       return vm_map_enter_mem_object(target_map,
-                                      address,
+       kr = vm_map_enter_mem_object(target_map,
+                                      &vmmaddr,
                                       initial_size,
                                       mask,
                                       flags,
@@ -896,6 +901,9 @@ mach_vm_map(
                                       cur_protection,
                                       max_protection,
                                       inheritance);
+
+       *address = vmmaddr;
+       return kr;
 }
 
 
@@ -1833,6 +1841,8 @@ mach_make_memory_entry_64(
        vm_prot_t               original_protections, mask_protections;
        unsigned int            wimg_mode;
 
+       boolean_t               force_shadow = FALSE;
+
        if (((permission & 0x00FF0000) &
             ~(MAP_MEM_ONLY |
               MAP_MEM_NAMED_CREATE |
@@ -1885,6 +1895,9 @@ mach_make_memory_entry_64(
                } else if (access == MAP_MEM_COPYBACK) {
                   SET_MAP_MEM(access, parent_entry->protection);
                   wimg_mode = VM_WIMG_USE_DEFAULT;
+               } else if (access == MAP_MEM_INNERWBACK) {
+                  SET_MAP_MEM(access, parent_entry->protection);
+                  wimg_mode = VM_WIMG_INNERWBACK;
                } else if (access == MAP_MEM_WTHRU) {
                   SET_MAP_MEM(access, parent_entry->protection);
                   wimg_mode = VM_WIMG_WTHRU;
@@ -1949,6 +1962,8 @@ mach_make_memory_entry_64(
                        wimg_mode = VM_WIMG_IO;
                } else if (access == MAP_MEM_COPYBACK) {
                        wimg_mode = VM_WIMG_USE_DEFAULT;
+               } else if (access == MAP_MEM_INNERWBACK) {
+                       wimg_mode = VM_WIMG_INNERWBACK;
                } else if (access == MAP_MEM_WTHRU) {
                        wimg_mode = VM_WIMG_WTHRU;
                } else if (access == MAP_MEM_WCOMB) {
@@ -2154,6 +2169,10 @@ redo_lookup:
                                                 */
                                                protections &= next_entry->max_protection;
                                        }
+                                       if ((next_entry->wired_count) &&
+                                           (map_entry->wired_count == 0)) {
+                                               break;
+                                       }
                                        if(((next_entry->max_protection) 
                                                & protections) != protections) {
                                                break;
@@ -2173,6 +2192,35 @@ redo_lookup:
                        }
                }
 
+#if !CONFIG_EMBEDDED
+               if (vm_map_entry_should_cow_for_true_share(map_entry) &&
+                   object->vo_size > map_size &&
+                   map_size != 0) {
+                       /*
+                        * Set up the targeted range for copy-on-write to
+                        * limit the impact of "true_share"/"copy_delay" to
+                        * that range instead of the entire VM object...
+                        */
+                       
+                       vm_object_unlock(object);
+                       if (vm_map_lock_read_to_write(target_map)) {
+                               vm_object_deallocate(object);
+                               target_map = original_map;
+                               goto redo_lookup;
+                       }
+
+                       vm_map_clip_start(target_map, map_entry, vm_map_trunc_page(offset));
+                       vm_map_clip_end(target_map, map_entry, vm_map_round_page(offset) + map_size);
+                       force_shadow = TRUE;
+
+                       map_size = map_entry->vme_end - map_entry->vme_start;
+                       total_size = map_size;
+
+                       vm_map_lock_write_to_read(target_map);
+                       vm_object_lock(object);
+               }
+#endif /* !CONFIG_EMBEDDED */
+
                if(object->internal) {
                        /* vm_map_lookup_locked will create a shadow if   */
                        /* needs_copy is set but does not check for the   */
@@ -2180,9 +2228,11 @@ redo_lookup:
                        /* set up an object which will not be pulled from */
                        /* under us.  */
 
-                       if ((map_entry->needs_copy  || object->shadowed ||
-                            (object->vo_size > total_size))
-                                       && !object->true_share) {
+                       if (force_shadow ||
+                           ((map_entry->needs_copy  ||
+                             object->shadowed ||
+                             (object->vo_size > total_size)) &&
+                            !object->true_share)) {
                                /*
                                 * We have to unlock the VM object before
                                 * trying to upgrade the VM map lock, to
@@ -2231,7 +2281,7 @@ redo_lookup:
                                        object, map_entry->offset,
                                        total_size,
                                        ((map_entry->is_shared 
-                                               || target_map->mapped)
+                                         || target_map->mapped_in_other_pmaps)
                                                        ? PMAP_NULL :
                                                        target_map->pmap),
                                        map_entry->vme_start,
@@ -2243,6 +2293,9 @@ redo_lookup:
 
                                vm_object_lock(shadow_object);
                                while (total_size) {
+                                   assert((next_entry->wired_count == 0) ||
+                                          (map_entry->wired_count));
+
                                   if(next_entry->object.vm_object == object) {
                                        vm_object_reference_locked(shadow_object);
                                        next_entry->object.vm_object 
@@ -2294,6 +2347,8 @@ redo_lookup:
                                wimg_mode = VM_WIMG_IO;
                        } else if (access == MAP_MEM_COPYBACK) {
                                wimg_mode = VM_WIMG_USE_DEFAULT;
+                       } else if (access == MAP_MEM_INNERWBACK) {
+                               wimg_mode = VM_WIMG_INNERWBACK;
                        } else if (access == MAP_MEM_WTHRU) {
                                wimg_mode = VM_WIMG_WTHRU;
                        } else if (access == MAP_MEM_WCOMB) {
@@ -2735,8 +2790,10 @@ mach_destroy_memory_entry(
        assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
 #endif /* MACH_ASSERT */
        named_entry = (vm_named_entry_t)port->ip_kobject;
-       lck_mtx_lock(&(named_entry)->Lock);
+
+       named_entry_lock(named_entry);
        named_entry->ref_count -= 1;
+
        if(named_entry->ref_count == 0) {
                if (named_entry->is_sub_map) {
                        vm_map_deallocate(named_entry->backing.map);
@@ -2745,12 +2802,13 @@ mach_destroy_memory_entry(
                        vm_object_deallocate(named_entry->backing.object);
                } /* else JMM - need to drop reference on pager in that case */
 
-               lck_mtx_unlock(&(named_entry)->Lock);
+               named_entry_unlock(named_entry);
+               named_entry_lock_destroy(named_entry);
 
                kfree((void *) port->ip_kobject,
                      sizeof (struct vm_named_entry));
        } else
-               lck_mtx_unlock(&(named_entry)->Lock);
+               named_entry_unlock(named_entry);
 }
 
 /* Allow manipulation of individual page state.  This is actually part of */