#define VM_PROTECT_WX_FAIL 1
#endif /* CONFIG_EMBEDDED */
uint64_t vm_memory_malloc_no_cow_mask = 0ULL;
+#if DEBUG
+int vm_check_map_sanity = 0;
+#endif
/*
* vm_map_init:
&vm_memory_malloc_no_cow_mask,
sizeof(vm_memory_malloc_no_cow_mask));
}
+
+#if DEBUG
+ PE_parse_boot_argn("vm_check_map_sanity", &vm_check_map_sanity, sizeof(vm_check_map_sanity));
+ if (vm_check_map_sanity) {
+ kprintf("VM sanity checking enabled\n");
+ } else {
+ kprintf("VM sanity checking disabled. Set bootarg vm_check_map_sanity=1 to enable\n");
+ }
+#endif /* DEBUG */
}
void
result->map_disallow_data_exec = FALSE;
result->is_nested_map = FALSE;
result->map_disallow_new_exec = FALSE;
+ result->terminated = FALSE;
result->highest_entry_end = 0;
result->first_free = vm_map_to_entry(result);
result->hint = vm_map_to_entry(result);
} else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
vm_named_entry_t named_entry;
- named_entry = (vm_named_entry_t) port->ip_kobject;
+ named_entry = (vm_named_entry_t) ip_get_kobject(port);
if (flags & (VM_FLAGS_RETURN_DATA_ADDR |
VM_FLAGS_RETURN_4K_DATA_ADDR)) {
/*
* Since this is the first time the user is wiring this map entry, check to see if we're
* exceeding the user wire limits. There is a per map limit which is the smaller of either
- * the process's rlimit or the global vm_user_wire_limit which caps this value. There is also
+ * the process's rlimit or the global vm_per_task_user_wire_limit which caps this value. There is also
* a system-wide limit on the amount of memory all users can wire. If the user is over either
* limit, then we fail.
*/
- if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) ||
- size + ptoa_64(total_wire_count) > vm_global_user_wire_limit ||
- size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount) {
+ if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_per_task_user_wire_limit) ||
+ size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) {
return KERN_RESOURCE_SHORTAGE;
}
const vm_map_offset_t FIND_GAP = 1; /* a not page aligned value */
const vm_map_offset_t GAPS_OK = 2; /* a different not page aligned value */
- if (map != kernel_map && !(flags & VM_MAP_REMOVE_GAPS_OK)) {
+ if (map != kernel_map && !(flags & VM_MAP_REMOVE_GAPS_OK) && !map->terminated) {
gap_start = FIND_GAP;
} else {
gap_start = GAPS_OK;
return KERN_SUCCESS;
}
+
+/*
+ * vm_map_terminate:
+ *
+ * Clean out a task's map.
+ */
+kern_return_t
+vm_map_terminate(
+ vm_map_t map)
+{
+ vm_map_lock(map);
+ map->terminated = TRUE;
+ vm_map_unlock(map);
+
+ return vm_map_remove(map,
+ map->min_offset,
+ map->max_offset,
+ /*
+ * Final cleanup:
+ * + no unnesting
+ * + remove immutable mappings
+ * + allow gaps in range
+ */
+ (VM_MAP_REMOVE_NO_UNNESTING |
+ VM_MAP_REMOVE_IMMUTABLE |
+ VM_MAP_REMOVE_GAPS_OK));
+}
+
/*
* vm_map_remove:
*
* Attempt non-blocking copy-on-write optimizations.
*/
- if (src_destroy &&
- (src_object == VM_OBJECT_NULL ||
- (src_object->internal &&
- src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
- src_entry->vme_start <= src_addr &&
- src_entry->vme_end >= src_end &&
- !map_share))) {
- /*
- * If we are destroying the source, and the object
- * is internal, we can move the object reference
- * from the source to the copy. The copy is
- * copy-on-write only if the source is.
- * We make another reference to the object, because
- * destroying the source entry will deallocate it.
- *
- * This memory transfer has to be atomic (to prevent
- * the VM object from being shared or copied while
- * it's being moved here), so we can only do this
- * if we won't have to unlock the VM map, i.e. the
- * entire range must be covered by this map entry.
- */
- vm_object_reference(src_object);
-
- /*
- * Copy is always unwired. vm_map_copy_entry
- * set its wired count to zero.
- */
-
- goto CopySuccessful;
- }
-
+ /*
+ * If we are destroying the source, and the object
+ * is internal, we could move the object reference
+ * from the source to the copy. The copy is
+ * copy-on-write only if the source is.
+ * We make another reference to the object, because
+ * destroying the source entry will deallocate it.
+ *
+ * This memory transfer has to be atomic, (to prevent
+ * the VM object from being shared or copied while
+ * it's being moved here), so we could only do this
+ * if we won't have to unlock the VM map until the
+ * original mapping has been fully removed.
+ */
RestartCopy:
if ((src_object == VM_OBJECT_NULL ||
if (!copy) {
if (src_entry->used_for_jit == TRUE) {
if (same_map) {
+#if __APRR_SUPPORTED__
+ /*
+ * Disallow re-mapping of any JIT regions on APRR devices.
+ */
+ result = KERN_PROTECTION_FAILURE;
+ break;
+#endif /* __APRR_SUPPORTED__*/
} else {
#if CONFIG_EMBEDDED
/*
if (ip_active(port) && (ip_kotype(port)
== IKOT_NAMED_ENTRY)) {
named_entry =
- (vm_named_entry_t)port->ip_kobject;
+ (vm_named_entry_t) ip_get_kobject(port);
if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
ip_unlock(port);
ip_lock(port);
if (ip_active(port) &&
(ip_kotype(port) == IKOT_NAMED_ENTRY)) {
- named_entry = (vm_named_entry_t)port->ip_kobject;
+ named_entry = (vm_named_entry_t) ip_get_kobject(port);
if (!(lck_mtx_try_lock(&(named_entry)->Lock))) {
ip_unlock(port);
try_failed_count++;
}
}
+ *shared_count = (unsigned int) ((dirty_shared_count * PAGE_SIZE_64) / (1024 * 1024ULL));
if (evaluation_phase) {
unsigned int shared_pages_threshold = (memorystatus_freeze_shared_mb_per_process_max * 1024 * 1024ULL) / PAGE_SIZE_64;
goto again;
} else {
kr = KERN_SUCCESS;
- *shared_count = (unsigned int) ((dirty_shared_count * PAGE_SIZE_64) / (1024 * 1024ULL));
}
done: