]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_user.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
index 8271d71b2c1aa9499cbb151103c9aba7bb2fb6ee..024140fb53562e8137fb76b73c08ee3894973366 100644 (file)
 #include <vm/memory_object.h>
 #include <vm/vm_pageout.h>
 #include <vm/vm_protos.h>
+#include <vm/vm_purgeable_internal.h>
 
 vm_size_t        upl_offset_to_pagelist = 0;
 
@@ -164,10 +165,12 @@ mach_vm_allocate(
                 */
                map_addr = vm_map_min(map);
                if (map_addr == 0)
-                       map_addr += PAGE_SIZE;
+                       map_addr += VM_MAP_PAGE_SIZE(map);
        } else
-               map_addr = vm_map_trunc_page(*addr);
-       map_size = vm_map_round_page(size);
+               map_addr = vm_map_trunc_page(*addr,
+                                            VM_MAP_PAGE_MASK(map));
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
        if (map_size == 0) {
          return(KERN_INVALID_ARGUMENT);
        }
@@ -231,10 +234,12 @@ vm_allocate(
                 */
                map_addr = vm_map_min(map);
                if (map_addr == 0)
-                       map_addr += PAGE_SIZE;
+                       map_addr += VM_MAP_PAGE_SIZE(map);
        } else
-               map_addr = vm_map_trunc_page(*addr);
-       map_size = vm_map_round_page(size);
+               map_addr = vm_map_trunc_page(*addr,
+                                            VM_MAP_PAGE_MASK(map));
+       map_size = vm_map_round_page(size,
+                                    VM_MAP_PAGE_MASK(map));
        if (map_size == 0) {
          return(KERN_INVALID_ARGUMENT);
        }
@@ -273,8 +278,12 @@ mach_vm_deallocate(
        if (size == (mach_vm_offset_t) 0)
                return(KERN_SUCCESS);
 
-       return(vm_map_remove(map, vm_map_trunc_page(start),
-                            vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
+       return(vm_map_remove(map,
+                            vm_map_trunc_page(start,
+                                              VM_MAP_PAGE_MASK(map)),
+                            vm_map_round_page(start+size,
+                                              VM_MAP_PAGE_MASK(map)),
+                            VM_MAP_NO_FLAGS));
 }
 
 /*
@@ -295,8 +304,12 @@ vm_deallocate(
        if (size == (vm_offset_t) 0)
                return(KERN_SUCCESS);
 
-       return(vm_map_remove(map, vm_map_trunc_page(start),
-                            vm_map_round_page(start+size), VM_MAP_NO_FLAGS));
+       return(vm_map_remove(map,
+                            vm_map_trunc_page(start,
+                                              VM_MAP_PAGE_MASK(map)),
+                            vm_map_round_page(start+size,
+                                              VM_MAP_PAGE_MASK(map)),
+                            VM_MAP_NO_FLAGS));
 }
 
 /*
@@ -319,8 +332,10 @@ mach_vm_inherit(
                return KERN_SUCCESS;
 
        return(vm_map_inherit(map,
-                             vm_map_trunc_page(start),
-                             vm_map_round_page(start+size),
+                             vm_map_trunc_page(start,
+                                               VM_MAP_PAGE_MASK(map)),
+                             vm_map_round_page(start+size,
+                                               VM_MAP_PAGE_MASK(map)),
                              new_inheritance));
 }
 
@@ -344,8 +359,10 @@ vm_inherit(
                return KERN_SUCCESS;
 
        return(vm_map_inherit(map,
-                             vm_map_trunc_page(start),
-                             vm_map_round_page(start+size),
+                             vm_map_trunc_page(start,
+                                               VM_MAP_PAGE_MASK(map)),
+                             vm_map_round_page(start+size,
+                                               VM_MAP_PAGE_MASK(map)),
                              new_inheritance));
 }
 
@@ -371,8 +388,10 @@ mach_vm_protect(
                return KERN_SUCCESS;
 
        return(vm_map_protect(map,
-                             vm_map_trunc_page(start),
-                             vm_map_round_page(start+size),
+                             vm_map_trunc_page(start,
+                                               VM_MAP_PAGE_MASK(map)),
+                             vm_map_round_page(start+size,
+                                               VM_MAP_PAGE_MASK(map)),
                              new_protection,
                              set_maximum));
 }
@@ -400,8 +419,10 @@ vm_protect(
                return KERN_SUCCESS;
 
        return(vm_map_protect(map,
-                             vm_map_trunc_page(start),
-                             vm_map_round_page(start+size),
+                             vm_map_trunc_page(start,
+                                               VM_MAP_PAGE_MASK(map)),
+                             vm_map_round_page(start+size,
+                                               VM_MAP_PAGE_MASK(map)),
                              new_protection,
                              set_maximum));
 }
@@ -425,11 +446,14 @@ mach_vm_machine_attribute(
        if (size == 0)
                return KERN_SUCCESS;
 
-       return vm_map_machine_attribute(map, 
-                               vm_map_trunc_page(addr),
-                               vm_map_round_page(addr+size),
-                               attribute,
-                               value);
+       return vm_map_machine_attribute(
+               map, 
+               vm_map_trunc_page(addr,
+                                 VM_MAP_PAGE_MASK(map)),
+               vm_map_round_page(addr+size,
+                                 VM_MAP_PAGE_MASK(map)),
+               attribute,
+               value);
 }
 
 /*
@@ -452,11 +476,14 @@ vm_machine_attribute(
        if (size == 0)
                return KERN_SUCCESS;
 
-       return vm_map_machine_attribute(map, 
-                               vm_map_trunc_page(addr),
-                               vm_map_round_page(addr+size),
-                               attribute,
-                               value);
+       return vm_map_machine_attribute(
+               map, 
+               vm_map_trunc_page(addr,
+                                 VM_MAP_PAGE_MASK(map)),
+               vm_map_round_page(addr+size,
+                                 VM_MAP_PAGE_MASK(map)),
+               attribute,
+               value);
 }
 
 /*
@@ -881,12 +908,17 @@ mach_vm_map(
        vm_prot_t               max_protection,
        vm_inherit_t            inheritance)
 {
+       kern_return_t           kr;
+       vm_map_offset_t         vmmaddr;
+
+       vmmaddr = (vm_map_offset_t) *address;
+
        /* filter out any kernel-only flags */
        if (flags & ~VM_FLAGS_USER_MAP)
                return KERN_INVALID_ARGUMENT;
 
-       return vm_map_enter_mem_object(target_map,
-                                      address,
+       kr = vm_map_enter_mem_object(target_map,
+                                      &vmmaddr,
                                       initial_size,
                                       mask,
                                       flags,
@@ -896,6 +928,9 @@ mach_vm_map(
                                       cur_protection,
                                       max_protection,
                                       inheritance);
+
+       *address = vmmaddr;
+       return kr;
 }
 
 
@@ -1099,11 +1134,20 @@ mach_vm_wire(
                return KERN_INVALID_ARGUMENT;
 
        if (access != VM_PROT_NONE) {
-               rc = vm_map_wire(map, vm_map_trunc_page(start),
-                                vm_map_round_page(start+size), access, TRUE);
+               rc = vm_map_wire(map,
+                                vm_map_trunc_page(start,
+                                                  VM_MAP_PAGE_MASK(map)),
+                                vm_map_round_page(start+size,
+                                                  VM_MAP_PAGE_MASK(map)),
+                                access,
+                                TRUE);
        } else {
-               rc = vm_map_unwire(map, vm_map_trunc_page(start),
-                                  vm_map_round_page(start+size), TRUE);
+               rc = vm_map_unwire(map,
+                                  vm_map_trunc_page(start,
+                                                    VM_MAP_PAGE_MASK(map)),
+                                  vm_map_round_page(start+size,
+                                                    VM_MAP_PAGE_MASK(map)),
+                                  TRUE);
        }
        return rc;
 }
@@ -1140,11 +1184,20 @@ vm_wire(
        if (size == 0) {
                rc = KERN_SUCCESS;
        } else if (access != VM_PROT_NONE) {
-               rc = vm_map_wire(map, vm_map_trunc_page(start),
-                                vm_map_round_page(start+size), access, TRUE);
+               rc = vm_map_wire(map,
+                                vm_map_trunc_page(start,
+                                                  VM_MAP_PAGE_MASK(map)),
+                                vm_map_round_page(start+size,
+                                                  VM_MAP_PAGE_MASK(map)),
+                                access,
+                                TRUE);
        } else {
-               rc = vm_map_unwire(map, vm_map_trunc_page(start),
-                                  vm_map_round_page(start+size), TRUE);
+               rc = vm_map_unwire(map,
+                                  vm_map_trunc_page(start,
+                                                    VM_MAP_PAGE_MASK(map)),
+                                  vm_map_round_page(start+size,
+                                                    VM_MAP_PAGE_MASK(map)),
+                                  TRUE);
        }
        return rc;
 }
@@ -1293,8 +1346,12 @@ mach_vm_behavior_set(
        if (size == 0)
                return KERN_SUCCESS;
 
-       return(vm_map_behavior_set(map, vm_map_trunc_page(start), 
-                                  vm_map_round_page(start+size), new_behavior));
+       return(vm_map_behavior_set(map,
+                                  vm_map_trunc_page(start,
+                                                    VM_MAP_PAGE_MASK(map)), 
+                                  vm_map_round_page(start+size,
+                                                    VM_MAP_PAGE_MASK(map)),
+                                  new_behavior));
 }
 
 /*
@@ -1323,8 +1380,12 @@ vm_behavior_set(
        if (size == 0)
                return KERN_SUCCESS;
 
-       return(vm_map_behavior_set(map, vm_map_trunc_page(start), 
-                                  vm_map_round_page(start+size), new_behavior));
+       return(vm_map_behavior_set(map,
+                                  vm_map_trunc_page(start,
+                                                    VM_MAP_PAGE_MASK(map)), 
+                                  vm_map_round_page(start+size,
+                                                    VM_MAP_PAGE_MASK(map)),
+                                  new_behavior));
 }
 
 /*
@@ -1601,7 +1662,7 @@ mach_vm_purgable_control(
                return KERN_INVALID_ARGUMENT;
 
        return vm_map_purgable_control(map,
-                                      vm_map_trunc_page(address),
+                                      vm_map_trunc_page(address, PAGE_MASK),
                                       control,
                                       state);
 }
@@ -1617,7 +1678,7 @@ vm_purgable_control(
                return KERN_INVALID_ARGUMENT;
 
        return vm_map_purgable_control(map,
-                                      vm_map_trunc_page(address),
+                                      vm_map_trunc_page(address, PAGE_MASK),
                                       control,
                                       state);
 }
@@ -1682,9 +1743,10 @@ mach_vm_page_query(
        if (VM_MAP_NULL == map)
                return KERN_INVALID_ARGUMENT;
 
-       return vm_map_page_query_internal(map,
-                                         vm_map_trunc_page(offset),
-                                         disposition, ref_count);
+       return vm_map_page_query_internal(
+               map,
+               vm_map_trunc_page(offset, PAGE_MASK),
+               disposition, ref_count);
 }
 
 kern_return_t
@@ -1697,9 +1759,10 @@ vm_map_page_query(
        if (VM_MAP_NULL == map)
                return KERN_INVALID_ARGUMENT;
 
-       return vm_map_page_query_internal(map,
-                                         vm_map_trunc_page(offset),
-                                         disposition, ref_count);
+       return vm_map_page_query_internal(
+               map,
+               vm_map_trunc_page(offset, PAGE_MASK),
+               disposition, ref_count);
 }
 
 kern_return_t
@@ -1828,18 +1891,28 @@ mach_make_memory_entry_64(
        vm_map_offset_t         local_offset;
        vm_object_size_t        mappable_size;
 
+       /* 
+        * Stash the offset in the page for use by vm_map_enter_mem_object()
+        * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case.
+        */
+       vm_object_offset_t      offset_in_page;
+
        unsigned int            access;
        vm_prot_t               protections;
        vm_prot_t               original_protections, mask_protections;
        unsigned int            wimg_mode;
 
        boolean_t               force_shadow = FALSE;
+       boolean_t               use_data_addr;
 
        if (((permission & 0x00FF0000) &
             ~(MAP_MEM_ONLY |
               MAP_MEM_NAMED_CREATE |
               MAP_MEM_PURGABLE | 
-              MAP_MEM_NAMED_REUSE))) {
+              MAP_MEM_NAMED_REUSE |
+              MAP_MEM_USE_DATA_ADDR |
+              MAP_MEM_VM_COPY |
+              MAP_MEM_VM_SHARE))) {
                /*
                 * Unknown flag: reject for forward compatibility.
                 */
@@ -1853,25 +1926,32 @@ mach_make_memory_entry_64(
                parent_entry = NULL;
        }
 
+       if (parent_entry && parent_entry->is_copy) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
        original_protections = permission & VM_PROT_ALL;
        protections = original_protections;
        mask_protections = permission & VM_PROT_IS_MASK;
        access = GET_MAP_MEM(permission);
+       use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
 
        user_handle = IP_NULL;
        user_entry = NULL;
 
-       map_offset = vm_map_trunc_page(offset);
-       map_size = vm_map_round_page(*size);
+       map_offset = vm_map_trunc_page(offset, PAGE_MASK);
 
        if (permission & MAP_MEM_ONLY) {
                boolean_t               parent_is_object;
 
-               if (parent_entry == NULL) {
+               map_size = vm_map_round_page(*size, PAGE_MASK);
+               
+               if (use_data_addr || parent_entry == NULL) {
                        return KERN_INVALID_ARGUMENT;
                }
 
-               parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager);
+               parent_is_object = !(parent_entry->is_sub_map ||
+                                    parent_entry->is_pager);
                object = parent_entry->backing.object;
                if(parent_is_object && object != VM_OBJECT_NULL)
                        wimg_mode = object->wimg_bits;
@@ -1887,6 +1967,9 @@ mach_make_memory_entry_64(
                } else if (access == MAP_MEM_COPYBACK) {
                   SET_MAP_MEM(access, parent_entry->protection);
                   wimg_mode = VM_WIMG_USE_DEFAULT;
+               } else if (access == MAP_MEM_INNERWBACK) {
+                  SET_MAP_MEM(access, parent_entry->protection);
+                  wimg_mode = VM_WIMG_INNERWBACK;
                } else if (access == MAP_MEM_WTHRU) {
                   SET_MAP_MEM(access, parent_entry->protection);
                   wimg_mode = VM_WIMG_WTHRU;
@@ -1907,9 +1990,13 @@ mach_make_memory_entry_64(
                if (object_handle)
                        *object_handle = IP_NULL;
                return KERN_SUCCESS;
-       }
+       } else if (permission & MAP_MEM_NAMED_CREATE) {
+               map_size = vm_map_round_page(*size, PAGE_MASK);
+
+               if (use_data_addr) {
+                       return KERN_INVALID_ARGUMENT;
+               }
 
-       if(permission & MAP_MEM_NAMED_CREATE) {
                kr = mach_memory_entry_allocate(&user_entry, &user_handle);
                if (kr != KERN_SUCCESS) {
                        return KERN_FAILURE;
@@ -1939,6 +2026,13 @@ mach_make_memory_entry_64(
                                goto make_mem_done;
                        }
                        object->purgable = VM_PURGABLE_NONVOLATILE;
+                       assert(object->vo_purgeable_owner == NULL);
+                       assert(object->resident_page_count == 0);
+                       assert(object->wired_page_count == 0);
+                       vm_object_lock(object);
+                       vm_purgeable_nonvolatile_enqueue(object,
+                                                        current_task());
+                       vm_object_unlock(object);
                }
 
                /*
@@ -1951,6 +2045,8 @@ mach_make_memory_entry_64(
                        wimg_mode = VM_WIMG_IO;
                } else if (access == MAP_MEM_COPYBACK) {
                        wimg_mode = VM_WIMG_USE_DEFAULT;
+               } else if (access == MAP_MEM_INNERWBACK) {
+                       wimg_mode = VM_WIMG_INNERWBACK;
                } else if (access == MAP_MEM_WTHRU) {
                        wimg_mode = VM_WIMG_WTHRU;
                } else if (access == MAP_MEM_WCOMB) {
@@ -1971,12 +2067,14 @@ mach_make_memory_entry_64(
                 * shadow objects either...
                 */
                object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+               object->true_share = TRUE;
 
                user_entry->backing.object = object;
                user_entry->internal = TRUE;
                user_entry->is_sub_map = FALSE;
                user_entry->is_pager = FALSE;
                user_entry->offset = 0;
+               user_entry->data_offset = 0;
                user_entry->protection = protections;
                SET_MAP_MEM(access, user_entry->protection);
                user_entry->size = map_size;
@@ -1989,9 +2087,135 @@ mach_make_memory_entry_64(
                return KERN_SUCCESS;
        }
 
+       if (permission & MAP_MEM_VM_COPY) {
+               vm_map_copy_t   copy;
+
+               if (target_map == VM_MAP_NULL) {
+                       return KERN_INVALID_TASK;
+               }
+
+               if (use_data_addr) {
+                       map_size = (vm_map_round_page(offset + *size,
+                                                     PAGE_MASK) -
+                                   map_offset);
+                       offset_in_page = offset - map_offset;
+               } else {
+                       map_size = vm_map_round_page(*size, PAGE_MASK);
+                       offset_in_page = 0;
+               }
+
+               kr = vm_map_copyin(target_map,
+                                  map_offset,
+                                  map_size,
+                                  FALSE,
+                                  &copy);
+               if (kr != KERN_SUCCESS) {
+                       return kr;
+               }
+                                  
+               kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+               if (kr != KERN_SUCCESS) {
+                       vm_map_copy_discard(copy);
+                       return KERN_FAILURE;
+               }
+
+               user_entry->backing.copy = copy;
+               user_entry->internal = FALSE;
+               user_entry->is_sub_map = FALSE;
+               user_entry->is_pager = FALSE;
+               user_entry->is_copy = TRUE;
+               user_entry->offset = 0;
+               user_entry->protection = protections;
+               user_entry->size = map_size;
+               user_entry->data_offset = offset_in_page;
+
+               *size = CAST_DOWN(vm_size_t, map_size);
+               *object_handle = user_handle;
+               return KERN_SUCCESS;
+       }
+
+       if (permission & MAP_MEM_VM_SHARE) {
+               vm_map_copy_t   copy;
+               vm_prot_t       cur_prot, max_prot;
+
+               if (target_map == VM_MAP_NULL) {
+                       return KERN_INVALID_TASK;
+               }
+
+               if (use_data_addr) {
+                       map_size = (vm_map_round_page(offset + *size,
+                                                     PAGE_MASK) -
+                                   map_offset);
+                       offset_in_page = offset - map_offset;
+               } else {
+                       map_size = vm_map_round_page(*size, PAGE_MASK);
+                       offset_in_page = 0;
+               }
+
+               kr = vm_map_copy_extract(target_map,
+                                        map_offset,
+                                        map_size,
+                                        &copy,
+                                        &cur_prot,
+                                        &max_prot);
+               if (kr != KERN_SUCCESS) {
+                       return kr;
+               }
+
+               if (mask_protections) {
+                       /*
+                        * We just want as much of "original_protections" 
+                        * as we can get out of the actual "cur_prot".
+                        */
+                       protections &= cur_prot;
+                       if (protections == VM_PROT_NONE) {
+                               /* no access at all: fail */
+                               vm_map_copy_discard(copy);
+                               return KERN_PROTECTION_FAILURE;
+                       }
+               } else {
+                       /*
+                        * We want exactly "original_protections"
+                        * out of "cur_prot".
+                        */
+                       if ((cur_prot & protections) != protections) {
+                               vm_map_copy_discard(copy);
+                               return KERN_PROTECTION_FAILURE;
+                       }
+               }
+
+               kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+               if (kr != KERN_SUCCESS) {
+                       vm_map_copy_discard(copy);
+                       return KERN_FAILURE;
+               }
+
+               user_entry->backing.copy = copy;
+               user_entry->internal = FALSE;
+               user_entry->is_sub_map = FALSE;
+               user_entry->is_pager = FALSE;
+               user_entry->is_copy = TRUE;
+               user_entry->offset = 0;
+               user_entry->protection = protections;
+               user_entry->size = map_size;
+               user_entry->data_offset = offset_in_page;
+
+               *size = CAST_DOWN(vm_size_t, map_size);
+               *object_handle = user_handle;
+               return KERN_SUCCESS;
+       }
+
        if (parent_entry == NULL ||
            (permission & MAP_MEM_NAMED_REUSE)) {
 
+               if (use_data_addr) {
+                       map_size = vm_map_round_page(offset + *size, PAGE_MASK) - map_offset;
+                       offset_in_page = offset - map_offset;
+               } else {
+                       map_size = vm_map_round_page(*size, PAGE_MASK);
+                       offset_in_page = 0;
+               }
+
                /* Create a named object based on address range within the task map */
                /* Go find the object at given address */
 
@@ -2156,6 +2380,10 @@ redo_lookup:
                                                 */
                                                protections &= next_entry->max_protection;
                                        }
+                                       if ((next_entry->wired_count) &&
+                                           (map_entry->wired_count == 0)) {
+                                               break;
+                                       }
                                        if(((next_entry->max_protection) 
                                                & protections) != protections) {
                                                break;
@@ -2175,7 +2403,6 @@ redo_lookup:
                        }
                }
 
-#if !CONFIG_EMBEDDED
                if (vm_map_entry_should_cow_for_true_share(map_entry) &&
                    object->vo_size > map_size &&
                    map_size != 0) {
@@ -2192,19 +2419,26 @@ redo_lookup:
                                goto redo_lookup;
                        }
 
-                       vm_map_clip_start(target_map, map_entry, vm_map_trunc_page(offset));
-                       vm_map_clip_end(target_map, map_entry, vm_map_round_page(offset) + map_size);
+                       vm_map_clip_start(target_map,
+                                         map_entry,
+                                         vm_map_trunc_page(offset,
+                                                           VM_MAP_PAGE_MASK(target_map)));
+                       vm_map_clip_end(target_map,
+                                       map_entry,
+                                       (vm_map_round_page(offset + map_size,
+                                                          VM_MAP_PAGE_MASK(target_map))));
                        force_shadow = TRUE;
 
-                       map_size = map_entry->vme_end - map_entry->vme_start;
-                       total_size = map_size;
+                       if ((map_entry->vme_end - offset) < map_size) {
+                               map_size = map_entry->vme_end - offset;
+                       }
+                       total_size = map_entry->vme_end - map_entry->vme_start;
 
                        vm_map_lock_write_to_read(target_map);
                        vm_object_lock(object);
                }
-#endif /* !CONFIG_EMBEDDED */
 
-               if(object->internal) {
+               if (object->internal) {
                        /* vm_map_lookup_locked will create a shadow if   */
                        /* needs_copy is set but does not check for the   */
                        /* other two conditions shown. It is important to */ 
@@ -2214,8 +2448,12 @@ redo_lookup:
                        if (force_shadow ||
                            ((map_entry->needs_copy  ||
                              object->shadowed ||
-                             (object->vo_size > total_size)) &&
-                            !object->true_share)) {
+                             (object->vo_size > total_size &&
+                              (map_entry->offset != 0 ||
+                               object->vo_size >
+                               vm_map_round_page(total_size,
+                                                 VM_MAP_PAGE_MASK(target_map)))))
+                            && !object->true_share)) {
                                /*
                                 * We have to unlock the VM object before
                                 * trying to upgrade the VM map lock, to
@@ -2240,7 +2478,9 @@ redo_lookup:
                                        target_map = original_map;
                                        goto redo_lookup;
                                }
+#if 00
                                vm_object_lock(object);
+#endif
 
                                /* 
                                 * JMM - We need to avoid coming here when the object
@@ -2253,7 +2493,9 @@ redo_lookup:
                                vm_object_shadow(&map_entry->object.vm_object,
                                                 &map_entry->offset, total_size);
                                shadow_object = map_entry->object.vm_object;
+#if 00
                                vm_object_unlock(object);
+#endif
 
                                prot = map_entry->protection & ~VM_PROT_WRITE;
 
@@ -2264,7 +2506,7 @@ redo_lookup:
                                        object, map_entry->offset,
                                        total_size,
                                        ((map_entry->is_shared 
-                                               || target_map->mapped)
+                                         || target_map->mapped_in_other_pmaps)
                                                        ? PMAP_NULL :
                                                        target_map->pmap),
                                        map_entry->vme_start,
@@ -2276,6 +2518,9 @@ redo_lookup:
 
                                vm_object_lock(shadow_object);
                                while (total_size) {
+                                   assert((next_entry->wired_count == 0) ||
+                                          (map_entry->wired_count));
+
                                   if(next_entry->object.vm_object == object) {
                                        vm_object_reference_locked(shadow_object);
                                        next_entry->object.vm_object 
@@ -2327,6 +2572,8 @@ redo_lookup:
                                wimg_mode = VM_WIMG_IO;
                        } else if (access == MAP_MEM_COPYBACK) {
                                wimg_mode = VM_WIMG_USE_DEFAULT;
+                       } else if (access == MAP_MEM_INNERWBACK) {
+                               wimg_mode = VM_WIMG_INNERWBACK;
                        } else if (access == MAP_MEM_WTHRU) {
                                wimg_mode = VM_WIMG_WTHRU;
                        } else if (access == MAP_MEM_WCOMB) {
@@ -2334,6 +2581,22 @@ redo_lookup:
                        }
                }
 
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+               if (!object->true_share &&
+                   vm_object_tracking_inited) {
+                       void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+                       int num = 0;
+
+                       num = OSBacktrace(bt,
+                                         VM_OBJECT_TRACKING_BTDEPTH);
+                       btlog_add_entry(vm_object_tracking_btlog,
+                                       object,
+                                       VM_OBJECT_TRACKING_OP_TRUESHARE,
+                                       bt,
+                                       num);
+               }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+
                object->true_share = TRUE;
                if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
                        object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
@@ -2374,7 +2637,9 @@ redo_lookup:
                            parent_entry->is_pager == FALSE &&
                            parent_entry->offset == obj_off &&
                            parent_entry->protection == protections &&
-                           parent_entry->size == map_size) {
+                           parent_entry->size == map_size &&
+                           ((!use_data_addr && (parent_entry->data_offset == 0)) ||  
+                            (use_data_addr && (parent_entry->data_offset == offset_in_page)))) {
                                /*
                                 * We have a match: re-use "parent_entry".
                                 */
@@ -2384,6 +2649,8 @@ redo_lookup:
                                /* parent_entry->ref_count++; XXX ? */
                                /* Get an extra send-right on handle */
                                ipc_port_copy_send(parent_handle);
+
+                               *size = CAST_DOWN(vm_size_t, map_size);
                                *object_handle = parent_handle;
                                return KERN_SUCCESS;
                        } else {
@@ -2407,6 +2674,7 @@ redo_lookup:
                user_entry->is_sub_map = FALSE;
                user_entry->is_pager = FALSE;
                user_entry->offset = obj_off;
+               user_entry->data_offset = offset_in_page;
                user_entry->protection = protections;
                SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection);
                user_entry->size = map_size;
@@ -2420,14 +2688,39 @@ redo_lookup:
 
        } else {
                /* The new object will be base on an existing named object */
-
                if (parent_entry == NULL) {
                        kr = KERN_INVALID_ARGUMENT;
                        goto make_mem_done;
                }
-               if((offset + map_size) > parent_entry->size) {
-                       kr = KERN_INVALID_ARGUMENT;
-                       goto make_mem_done;
+
+               if (use_data_addr) {
+                       /*
+                        * submaps and pagers should only be accessible from within
+                        * the kernel, which shouldn't use the data address flag, so can fail here.
+                        */
+                       if (parent_entry->is_pager || parent_entry->is_sub_map) {
+                               panic("Shouldn't be using data address with a parent entry that is a submap or pager.");
+                       }
+                       /*
+                        * Account for offset to data in parent entry and
+                        * compute our own offset to data.
+                        */
+                       if((offset + *size + parent_entry->data_offset) > parent_entry->size) {
+                               kr = KERN_INVALID_ARGUMENT;
+                               goto make_mem_done;
+                       }
+
+                       map_offset = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK);
+                       offset_in_page = (offset + parent_entry->data_offset) - map_offset;
+                       map_size = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK) - map_offset;
+               } else {
+                       map_size = vm_map_round_page(*size, PAGE_MASK);
+                       offset_in_page = 0;
+
+                       if((offset + map_size) > parent_entry->size) {
+                               kr = KERN_INVALID_ARGUMENT;
+                               goto make_mem_done;
+                       }
                }
 
                if (mask_protections) {
@@ -2451,8 +2744,10 @@ redo_lookup:
 
                user_entry->size = map_size;
                user_entry->offset = parent_entry->offset + map_offset;
+               user_entry->data_offset = offset_in_page; 
                user_entry->is_sub_map = parent_entry->is_sub_map;
                user_entry->is_pager = parent_entry->is_pager;
+               user_entry->is_copy = parent_entry->is_copy;
                user_entry->internal = parent_entry->internal;
                user_entry->protection = protections;
 
@@ -2476,6 +2771,22 @@ redo_lookup:
                   /* we now point to this object, hold on */
                   vm_object_reference(object); 
                   vm_object_lock(object);
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+               if (!object->true_share &&
+                   vm_object_tracking_inited) {
+                       void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+                       int num = 0;
+
+                       num = OSBacktrace(bt,
+                                         VM_OBJECT_TRACKING_BTDEPTH);
+                       btlog_add_entry(vm_object_tracking_btlog,
+                                       object,
+                                       VM_OBJECT_TRACKING_OP_TRUESHARE,
+                                       bt,
+                                       num);
+               }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+
                   object->true_share = TRUE;
                   if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
                        object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
@@ -2603,9 +2914,11 @@ mach_memory_entry_allocate(
        user_entry->backing.pager = NULL;
        user_entry->is_sub_map = FALSE;
        user_entry->is_pager = FALSE;
+       user_entry->is_copy = FALSE;
        user_entry->internal = FALSE;
        user_entry->size = 0;
        user_entry->offset = 0;
+       user_entry->data_offset = 0;
        user_entry->protection = VM_PROT_NONE;
        user_entry->ref_count = 1;
 
@@ -2703,7 +3016,9 @@ mach_memory_entry_purgable_control(
 
        named_entry_lock(mem_entry);
 
-       if (mem_entry->is_sub_map || mem_entry->is_pager) {
+       if (mem_entry->is_sub_map ||
+           mem_entry->is_pager ||
+           mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
@@ -2732,6 +3047,54 @@ mach_memory_entry_purgable_control(
        return kr;
 }
 
+kern_return_t
+mach_memory_entry_get_page_counts(
+       ipc_port_t      entry_port,
+       unsigned int    *resident_page_count,
+       unsigned int    *dirty_page_count)
+{
+       kern_return_t           kr;
+       vm_named_entry_t        mem_entry;
+       vm_object_t             object;
+       vm_object_offset_t      offset;
+       vm_object_size_t        size;
+
+       if (entry_port == IP_NULL ||
+           ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       mem_entry = (vm_named_entry_t) entry_port->ip_kobject;
+
+       named_entry_lock(mem_entry);
+
+       if (mem_entry->is_sub_map ||
+           mem_entry->is_pager ||
+           mem_entry->is_copy) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       object = mem_entry->backing.object;
+       if (object == VM_OBJECT_NULL) {
+               named_entry_unlock(mem_entry);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       vm_object_lock(object);
+
+       offset = mem_entry->offset;
+       size = mem_entry->size;
+
+       named_entry_unlock(mem_entry);
+
+       kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count);
+
+       vm_object_unlock(object);
+
+       return kr;
+}
+
 /*
  * mach_memory_entry_port_release:
  *
@@ -2768,22 +3131,29 @@ mach_destroy_memory_entry(
        assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
 #endif /* MACH_ASSERT */
        named_entry = (vm_named_entry_t)port->ip_kobject;
-       lck_mtx_lock(&(named_entry)->Lock);
+
+       named_entry_lock(named_entry);
        named_entry->ref_count -= 1;
+
        if(named_entry->ref_count == 0) {
                if (named_entry->is_sub_map) {
                        vm_map_deallocate(named_entry->backing.map);
-               } else if (!named_entry->is_pager) { 
-                       /* release the memory object we've been pointing to */
+               } else if (named_entry->is_pager) {
+                       /* JMM - need to drop reference on pager in that case */
+               } else if (named_entry->is_copy) {
+                       vm_map_copy_discard(named_entry->backing.copy);
+               } else {
+                       /* release the VM object we've been pointing to */
                        vm_object_deallocate(named_entry->backing.object);
-               } /* else JMM - need to drop reference on pager in that case */
+               }
 
-               lck_mtx_unlock(&(named_entry)->Lock);
+               named_entry_unlock(named_entry);
+               named_entry_lock_destroy(named_entry);
 
                kfree((void *) port->ip_kobject,
                      sizeof (struct vm_named_entry));
        } else
-               lck_mtx_unlock(&(named_entry)->Lock);
+               named_entry_unlock(named_entry);
 }
 
 /* Allow manipulation of individual page state.  This is actually part of */
@@ -2810,7 +3180,9 @@ mach_memory_entry_page_op(
 
        named_entry_lock(mem_entry);
 
-       if (mem_entry->is_sub_map || mem_entry->is_pager) {
+       if (mem_entry->is_sub_map ||
+           mem_entry->is_pager ||
+           mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
@@ -2862,7 +3234,9 @@ mach_memory_entry_range_op(
 
        named_entry_lock(mem_entry);
 
-       if (mem_entry->is_sub_map || mem_entry->is_pager) {
+       if (mem_entry->is_sub_map ||
+           mem_entry->is_pager ||
+           mem_entry->is_copy) {
                named_entry_unlock(mem_entry);
                return KERN_INVALID_ARGUMENT;
        }
@@ -3068,7 +3442,10 @@ vm_region_object_create(
        /* Create a named object based on a submap of specified size */
 
        new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS,
-                               vm_map_round_page(size), TRUE);
+                               vm_map_round_page(size,
+                                                 VM_MAP_PAGE_MASK(target_map)),
+                               TRUE);
+       vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
 
        user_entry->backing.map = new_map;
        user_entry->internal = TRUE;
@@ -3098,7 +3475,7 @@ vm_map_get_phys_page(
        vm_map_entry_t          entry;
        ppnum_t                 phys_page = 0;
 
-       map_offset = vm_map_trunc_page(addr);
+       map_offset = vm_map_trunc_page(addr, PAGE_MASK);
 
        vm_map_lock(map);
        while (vm_map_lookup_entry(map, map_offset, &entry)) {
@@ -3132,7 +3509,7 @@ vm_map_get_phys_page(
                        offset = entry->offset + (map_offset - entry->vme_start);
                        phys_page = (ppnum_t)
                                ((entry->object.vm_object->vo_shadow_offset 
-                                                       + offset) >> 12);
+                                                       + offset) >> PAGE_SHIFT);
                        break;
                        
                }
@@ -3230,8 +3607,9 @@ kernel_object_iopl_request(
        /* offset from beginning of named entry offset in object */
        offset = offset + named_entry->offset;
 
-       if(named_entry->is_sub_map) 
-               return (KERN_INVALID_ARGUMENT);
+       if (named_entry->is_sub_map ||
+           named_entry->is_copy)
+               return KERN_INVALID_ARGUMENT;
                
        named_entry_lock(named_entry);
 
@@ -3279,8 +3657,8 @@ kernel_object_iopl_request(
        }
 
        if (!object->private) {
-               if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
-                       *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
+               if (*upl_size > MAX_UPL_TRANSFER_BYTES)
+                       *upl_size = MAX_UPL_TRANSFER_BYTES;
                if (object->phys_contiguous) {
                        *flags = UPL_PHYS_CONTIG;
                } else {