]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_map.c
xnu-3248.50.21.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.c
index f0a560f067d8a0324229a1534f7a95a57e791082..840c4babfa0e6d06844d43a9abe26bc1dab81708 100644 (file)
@@ -2514,12 +2514,20 @@ StartAgain: ;
                                 */
                                new_entry->use_pmap = FALSE;
                        } else if (!is_submap &&
-                                  iokit_acct) {
+                                  iokit_acct &&
+                                  object != VM_OBJECT_NULL &&
+                                  object->internal) {
                                /* alternate accounting */
                                assert(!new_entry->iokit_acct);
                                assert(new_entry->use_pmap);
                                new_entry->iokit_acct = TRUE;
                                new_entry->use_pmap = FALSE;
+                               DTRACE_VM4(
+                                       vm_map_iokit_mapped_region,
+                                       vm_map_t, map,
+                                       vm_map_offset_t, new_entry->vme_start,
+                                       vm_map_offset_t, new_entry->vme_end,
+                                       int, VME_ALIAS(new_entry));
                                vm_map_iokit_mapped_region(
                                        map,
                                        (new_entry->vme_end -
@@ -4376,6 +4384,7 @@ vm_map_submap(
        return(result);
 }
 
+
 /*
  *     vm_map_protect:
  *
@@ -4549,6 +4558,7 @@ vm_map_protect(
                        if (override_nx(map, VME_ALIAS(current)) && prot)
                                prot |= VM_PROT_EXECUTE;
 
+
                        if (current->is_sub_map && current->use_pmap) {
                                pmap_protect(VME_SUBMAP(current)->pmap, 
                                             current->vme_start,
@@ -4999,6 +5009,7 @@ vm_map_wire_nested(
                                           &real_map)) {
 
                                        vm_map_unlock_read(lookup_map);
+                                       assert(map_pmap == NULL);
                                        vm_map_unwire(map, start,
                                                      s, user_wire);
                                        return(KERN_FAILURE);
@@ -5347,7 +5358,8 @@ done:
 
        if (rc != KERN_SUCCESS) {
                /* undo what has been wired so far */
-               vm_map_unwire(map, start, s, user_wire);
+               vm_map_unwire_nested(map, start, s, user_wire,
+                                    map_pmap, pmap_addr);
                if (physpage_p) {
                        *physpage_p = 0;
                }
@@ -6458,6 +6470,11 @@ vm_map_delete(
 
                if (entry->iokit_acct) {
                        /* alternate accounting */
+                       DTRACE_VM4(vm_map_iokit_unmapped_region,
+                                  vm_map_t, map,
+                                  vm_map_offset_t, entry->vme_start,
+                                  vm_map_offset_t, entry->vme_end,
+                                  int, VME_ALIAS(entry));
                        vm_map_iokit_unmapped_region(map,
                                                     (entry->vme_end -
                                                      entry->vme_start));
@@ -8645,6 +8662,39 @@ vm_map_copy_remap(
        }
 }
 
+
+boolean_t
+vm_map_copy_validate_size(
+       vm_map_t                dst_map,
+       vm_map_copy_t           copy,
+       vm_map_size_t           size)
+{
+       if (copy == VM_MAP_COPY_NULL)
+               return FALSE;
+       switch (copy->type) {
+       case VM_MAP_COPY_OBJECT:
+       case VM_MAP_COPY_KERNEL_BUFFER:
+               if (size == copy->size)
+                       return TRUE;
+               break;
+       case VM_MAP_COPY_ENTRY_LIST:
+               /*
+                * potential page-size rounding prevents us from exactly
+                * validating this flavor of vm_map_copy, but we can at least
+                * assert that it's within a range.
+                */
+               if (copy->size >= size &&
+                   copy->size <= vm_map_round_page(size,
+                                                   VM_MAP_PAGE_MASK(dst_map)))
+                       return TRUE;
+               break;
+       default:
+               break;
+       }
+       return FALSE;
+}
+
+
 /*
  *     Routine:        vm_map_copyout
  *
@@ -9119,13 +9169,35 @@ vm_map_copyin_common(
        __unused boolean_t      src_volatile,
        vm_map_copy_t   *copy_result,   /* OUT */
        boolean_t       use_maxprot)
+{
+       int flags;
+
+       flags = 0;
+       if (src_destroy) {
+               flags |= VM_MAP_COPYIN_SRC_DESTROY;
+       }
+       if (use_maxprot) {
+               flags |= VM_MAP_COPYIN_USE_MAXPROT;
+       }
+       return vm_map_copyin_internal(src_map,
+                                     src_addr,
+                                     len,
+                                     flags,
+                                     copy_result);
+}
+kern_return_t
+vm_map_copyin_internal(
+       vm_map_t        src_map,
+       vm_map_address_t src_addr,
+       vm_map_size_t   len,
+       int             flags,
+       vm_map_copy_t   *copy_result)   /* OUT */
 {
        vm_map_entry_t  tmp_entry;      /* Result of last map lookup --
                                         * in multi-level lookup, this
                                         * entry contains the actual
                                         * vm_object/offset.
                                         */
-       register
        vm_map_entry_t  new_entry = VM_MAP_ENTRY_NULL;  /* Map entry for copy */
 
        vm_map_offset_t src_start;      /* Start of current entry --
@@ -9138,10 +9210,18 @@ vm_map_copyin_common(
        boolean_t       map_share=FALSE;
        submap_map_t    *parent_maps = NULL;
 
-       register
        vm_map_copy_t   copy;           /* Resulting copy */
        vm_map_address_t copy_addr;
        vm_map_size_t   copy_size;
+       boolean_t       src_destroy;
+       boolean_t       use_maxprot;
+
+       if (flags & ~VM_MAP_COPYIN_ALL_FLAGS) {
+               return KERN_INVALID_ARGUMENT;
+       }
+               
+       src_destroy = (flags & VM_MAP_COPYIN_SRC_DESTROY) ? TRUE : FALSE;
+       use_maxprot = (flags & VM_MAP_COPYIN_USE_MAXPROT) ? TRUE : FALSE;
 
        /*
         *      Check for copies of zero bytes.
@@ -9165,7 +9245,9 @@ vm_map_copyin_common(
         * setting up VM (and taking C-O-W faults) dominates the copy costs
         * for small regions.
         */
-       if ((len < msg_ool_size_small) && !use_maxprot)
+       if ((len < msg_ool_size_small) &&
+           !use_maxprot &&
+           !(flags & VM_MAP_COPYIN_ENTRY_LIST))
                return vm_map_copyin_kernel_buffer(src_map, src_addr, len,
                                                   src_destroy, copy_result);
 
@@ -12665,6 +12747,10 @@ vm_map_entry_is_reusable(
 
        vm_object_t object;
 
+       if (entry->is_sub_map) {
+               return FALSE;
+       }
+
        switch (VME_ALIAS(entry)) {
        case VM_MEMORY_MALLOC:
        case VM_MEMORY_MALLOC_SMALL:
@@ -12795,6 +12881,7 @@ vm_map_reuse_pages(
                start_offset += VME_OFFSET(entry);
                end_offset += VME_OFFSET(entry);
 
+               assert(!entry->is_sub_map);
                object = VME_OBJECT(entry);
                if (object != VM_OBJECT_NULL) {
                        vm_object_lock(object);
@@ -12885,6 +12972,7 @@ vm_map_reusable_pages(
                start_offset += VME_OFFSET(entry);
                end_offset += VME_OFFSET(entry);
 
+               assert(!entry->is_sub_map);
                object = VME_OBJECT(entry);
                if (object == VM_OBJECT_NULL)
                        continue;
@@ -15846,7 +15934,6 @@ vm_map_query_volatile(
        mach_vm_size_t  volatile_pmap_count;
        mach_vm_size_t  volatile_compressed_pmap_count;
        mach_vm_size_t  resident_count;
-       unsigned int    compressed_count;
        vm_map_entry_t  entry;
        vm_object_t     object;
 
@@ -15861,6 +15948,8 @@ vm_map_query_volatile(
        for (entry = vm_map_first_entry(map);
             entry != vm_map_to_entry(map);
             entry = entry->vme_next) {
+               mach_vm_size_t  pmap_resident_bytes, pmap_compressed_bytes;
+
                if (entry->is_sub_map) {
                        continue;
                }
@@ -15898,12 +15987,15 @@ vm_map_query_volatile(
                        volatile_compressed_count +=
                                vm_compressor_pager_get_count(object->pager);
                }
-               compressed_count = 0;
-               volatile_pmap_count += pmap_query_resident(map->pmap,
-                                                          entry->vme_start,
-                                                          entry->vme_end,
-                                                          &compressed_count);
-               volatile_compressed_pmap_count += compressed_count;
+               pmap_compressed_bytes = 0;
+               pmap_resident_bytes =
+                       pmap_query_resident(map->pmap,
+                                           entry->vme_start,
+                                           entry->vme_end,
+                                           &pmap_compressed_bytes);
+               volatile_pmap_count += (pmap_resident_bytes / PAGE_SIZE);
+               volatile_compressed_pmap_count += (pmap_compressed_bytes
+                                                  / PAGE_SIZE);
        }
 
        /* map is still locked on return */