]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_map.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.c
index d130132c30bd3b34ab6251047e6b1e0ce9a688b9..ab0d87614e7a3d13ae0e22ac4d2a2087a57754ed 100644 (file)
@@ -867,6 +867,9 @@ int malloc_no_cow = 0;
 #define VM_PROTECT_WX_FAIL 1
 #endif /* CONFIG_EMBEDDED */
 uint64_t vm_memory_malloc_no_cow_mask = 0ULL;
 #define VM_PROTECT_WX_FAIL 1
 #endif /* CONFIG_EMBEDDED */
 uint64_t vm_memory_malloc_no_cow_mask = 0ULL;
+#if DEBUG
+int vm_check_map_sanity = 0;
+#endif
 
 /*
  *     vm_map_init:
 
 /*
  *     vm_map_init:
@@ -1016,6 +1019,15 @@ vm_map_init(
                    &vm_memory_malloc_no_cow_mask,
                    sizeof(vm_memory_malloc_no_cow_mask));
        }
                    &vm_memory_malloc_no_cow_mask,
                    sizeof(vm_memory_malloc_no_cow_mask));
        }
+
+#if DEBUG
+       PE_parse_boot_argn("vm_check_map_sanity", &vm_check_map_sanity, sizeof(vm_check_map_sanity));
+       if (vm_check_map_sanity) {
+               kprintf("VM sanity checking enabled\n");
+       } else {
+               kprintf("VM sanity checking disabled. Set bootarg vm_check_map_sanity=1 to enable\n");
+       }
+#endif /* DEBUG */
 }
 
 void
 }
 
 void
@@ -1185,6 +1197,7 @@ vm_map_create_options(
        result->map_disallow_data_exec = FALSE;
        result->is_nested_map = FALSE;
        result->map_disallow_new_exec = FALSE;
        result->map_disallow_data_exec = FALSE;
        result->is_nested_map = FALSE;
        result->map_disallow_new_exec = FALSE;
+       result->terminated = FALSE;
        result->highest_entry_end = 0;
        result->first_free = vm_map_to_entry(result);
        result->hint = vm_map_to_entry(result);
        result->highest_entry_end = 0;
        result->first_free = vm_map_to_entry(result);
        result->hint = vm_map_to_entry(result);
@@ -4011,7 +4024,7 @@ vm_map_enter_mem_object_helper(
        } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
                vm_named_entry_t        named_entry;
 
        } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
                vm_named_entry_t        named_entry;
 
-               named_entry = (vm_named_entry_t) port->ip_kobject;
+               named_entry = (vm_named_entry_t) ip_get_kobject(port);
 
                if (flags & (VM_FLAGS_RETURN_DATA_ADDR |
                    VM_FLAGS_RETURN_4K_DATA_ADDR)) {
 
                if (flags & (VM_FLAGS_RETURN_DATA_ADDR |
                    VM_FLAGS_RETURN_4K_DATA_ADDR)) {
@@ -6135,14 +6148,13 @@ add_wire_counts(
                        /*
                         * Since this is the first time the user is wiring this map entry, check to see if we're
                         * exceeding the user wire limits.  There is a per map limit which is the smaller of either
                        /*
                         * Since this is the first time the user is wiring this map entry, check to see if we're
                         * exceeding the user wire limits.  There is a per map limit which is the smaller of either
-                        * the process's rlimit or the global vm_user_wire_limit which caps this value.  There is also
+                        * the process's rlimit or the global vm_per_task_user_wire_limit which caps this value.  There is also
                         * a system-wide limit on the amount of memory all users can wire.  If the user is over either
                         * limit, then we fail.
                         */
 
                         * a system-wide limit on the amount of memory all users can wire.  If the user is over either
                         * limit, then we fail.
                         */
 
-                       if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) ||
-                           size + ptoa_64(total_wire_count) > vm_global_user_wire_limit ||
-                           size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount) {
+                       if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_per_task_user_wire_limit) ||
+                           size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) {
                                return KERN_RESOURCE_SHORTAGE;
                        }
 
                                return KERN_RESOURCE_SHORTAGE;
                        }
 
@@ -7601,7 +7613,7 @@ vm_map_delete(
        const vm_map_offset_t   FIND_GAP = 1;   /* a not page aligned value */
        const vm_map_offset_t   GAPS_OK = 2;    /* a different not page aligned value */
 
        const vm_map_offset_t   FIND_GAP = 1;   /* a not page aligned value */
        const vm_map_offset_t   GAPS_OK = 2;    /* a different not page aligned value */
 
-       if (map != kernel_map && !(flags & VM_MAP_REMOVE_GAPS_OK)) {
+       if (map != kernel_map && !(flags & VM_MAP_REMOVE_GAPS_OK) && !map->terminated) {
                gap_start = FIND_GAP;
        } else {
                gap_start = GAPS_OK;
                gap_start = FIND_GAP;
        } else {
                gap_start = GAPS_OK;
@@ -8316,6 +8328,34 @@ vm_map_delete(
        return KERN_SUCCESS;
 }
 
        return KERN_SUCCESS;
 }
 
+
+/*
+ *     vm_map_terminate:
+ *
+ *     Clean out a task's map.
+ */
+kern_return_t
+vm_map_terminate(
+       vm_map_t        map)
+{
+       vm_map_lock(map);
+       map->terminated = TRUE;
+       vm_map_unlock(map);
+
+       return vm_map_remove(map,
+                  map->min_offset,
+                  map->max_offset,
+                  /*
+                   * Final cleanup:
+                   * + no unnesting
+                   * + remove immutable mappings
+                   * + allow gaps in range
+                   */
+                  (VM_MAP_REMOVE_NO_UNNESTING |
+                  VM_MAP_REMOVE_IMMUTABLE |
+                  VM_MAP_REMOVE_GAPS_OK));
+}
+
 /*
  *     vm_map_remove:
  *
 /*
  *     vm_map_remove:
  *
@@ -11376,37 +11416,20 @@ vm_map_copyin_internal(
                 *      Attempt non-blocking copy-on-write optimizations.
                 */
 
                 *      Attempt non-blocking copy-on-write optimizations.
                 */
 
-               if (src_destroy &&
-                   (src_object == VM_OBJECT_NULL ||
-                   (src_object->internal &&
-                   src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
-                   src_entry->vme_start <= src_addr &&
-                   src_entry->vme_end >= src_end &&
-                   !map_share))) {
-                       /*
-                        * If we are destroying the source, and the object
-                        * is internal, we can move the object reference
-                        * from the source to the copy.  The copy is
-                        * copy-on-write only if the source is.
-                        * We make another reference to the object, because
-                        * destroying the source entry will deallocate it.
-                        *
-                        * This memory transfer has to be atomic (to prevent
-                        * the VM object from being shared or copied while
-                        * it's being moved here), so we can only do this
-                        * if we won't have to unlock the VM map, i.e. the
-                        * entire range must be covered by this map entry.
-                        */
-                       vm_object_reference(src_object);
-
-                       /*
-                        * Copy is always unwired.  vm_map_copy_entry
-                        * set its wired count to zero.
-                        */
-
-                       goto CopySuccessful;
-               }
-
+               /*
+                * If we are destroying the source, and the object
+                * is internal, we could move the object reference
+                * from the source to the copy.  The copy is
+                * copy-on-write only if the source is.
+                * We make another reference to the object, because
+                * destroying the source entry will deallocate it.
+                *
+                * This memory transfer has to be atomic, (to prevent
+                * the VM object from being shared or copied while
+                * it's being moved here), so we could only do this
+                * if we won't have to unlock the VM map until the
+                * original mapping has been fully removed.
+                */
 
 RestartCopy:
                if ((src_object == VM_OBJECT_NULL ||
 
 RestartCopy:
                if ((src_object == VM_OBJECT_NULL ||
@@ -15957,6 +15980,13 @@ RestartCopy:
                if (!copy) {
                        if (src_entry->used_for_jit == TRUE) {
                                if (same_map) {
                if (!copy) {
                        if (src_entry->used_for_jit == TRUE) {
                                if (same_map) {
+#if __APRR_SUPPORTED__
+                                       /*
+                                        * Disallow re-mapping of any JIT regions on APRR devices.
+                                        */
+                                       result = KERN_PROTECTION_FAILURE;
+                                       break;
+#endif /* __APRR_SUPPORTED__*/
                                } else {
 #if CONFIG_EMBEDDED
                                        /*
                                } else {
 #if CONFIG_EMBEDDED
                                        /*
@@ -17808,7 +17838,7 @@ convert_port_entry_to_map(
                        if (ip_active(port) && (ip_kotype(port)
                            == IKOT_NAMED_ENTRY)) {
                                named_entry =
                        if (ip_active(port) && (ip_kotype(port)
                            == IKOT_NAMED_ENTRY)) {
                                named_entry =
-                                   (vm_named_entry_t)port->ip_kobject;
+                                   (vm_named_entry_t) ip_get_kobject(port);
                                if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
                                        ip_unlock(port);
 
                                if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
                                        ip_unlock(port);
 
@@ -17866,7 +17896,7 @@ try_again:
                ip_lock(port);
                if (ip_active(port) &&
                    (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
                ip_lock(port);
                if (ip_active(port) &&
                    (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
-                       named_entry = (vm_named_entry_t)port->ip_kobject;
+                       named_entry = (vm_named_entry_t) ip_get_kobject(port);
                        if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
                                ip_unlock(port);
                                try_failed_count++;
                        if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
                                ip_unlock(port);
                                try_failed_count++;
@@ -18691,6 +18721,7 @@ again:
                }
        }
 
                }
        }
 
+       *shared_count = (unsigned int) ((dirty_shared_count * PAGE_SIZE_64) / (1024 * 1024ULL));
        if (evaluation_phase) {
                unsigned int shared_pages_threshold = (memorystatus_freeze_shared_mb_per_process_max * 1024 * 1024ULL) / PAGE_SIZE_64;
 
        if (evaluation_phase) {
                unsigned int shared_pages_threshold = (memorystatus_freeze_shared_mb_per_process_max * 1024 * 1024ULL) / PAGE_SIZE_64;
 
@@ -18723,7 +18754,6 @@ again:
                goto again;
        } else {
                kr = KERN_SUCCESS;
                goto again;
        } else {
                kr = KERN_SUCCESS;
-               *shared_count = (unsigned int) ((dirty_shared_count * PAGE_SIZE_64) / (1024 * 1024ULL));
        }
 
 done:
        }
 
 done: