vm_prot_t max_protection,
vm_inherit_t inheritance)
{
+ kern_return_t kr;
+ vm_map_offset_t vmmaddr;
+
+ vmmaddr = (vm_map_offset_t) *address;
+
/* filter out any kernel-only flags */
if (flags & ~VM_FLAGS_USER_MAP)
return KERN_INVALID_ARGUMENT;
- return vm_map_enter_mem_object(target_map,
- address,
+ kr = vm_map_enter_mem_object(target_map,
+ &vmmaddr,
initial_size,
mask,
flags,
cur_protection,
max_protection,
inheritance);
+
+ *address = vmmaddr;
+ return kr;
}
vm_prot_t original_protections, mask_protections;
unsigned int wimg_mode;
+ boolean_t force_shadow = FALSE;
+
if (((permission & 0x00FF0000) &
~(MAP_MEM_ONLY |
MAP_MEM_NAMED_CREATE |
} else if (access == MAP_MEM_COPYBACK) {
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_INNERWBACK;
} else if (access == MAP_MEM_WTHRU) {
SET_MAP_MEM(access, parent_entry->protection);
wimg_mode = VM_WIMG_WTHRU;
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ wimg_mode = VM_WIMG_INNERWBACK;
} else if (access == MAP_MEM_WTHRU) {
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
*/
protections &= next_entry->max_protection;
}
+ if ((next_entry->wired_count) &&
+ (map_entry->wired_count == 0)) {
+ break;
+ }
if(((next_entry->max_protection)
& protections) != protections) {
break;
}
}
+#if !CONFIG_EMBEDDED
+ if (vm_map_entry_should_cow_for_true_share(map_entry) &&
+ object->vo_size > map_size &&
+ map_size != 0) {
+ /*
+ * Set up the targeted range for copy-on-write to
+ * limit the impact of "true_share"/"copy_delay" to
+ * that range instead of the entire VM object...
+ */
+
+ vm_object_unlock(object);
+ if (vm_map_lock_read_to_write(target_map)) {
+ vm_object_deallocate(object);
+ target_map = original_map;
+ goto redo_lookup;
+ }
+
+ vm_map_clip_start(target_map, map_entry, vm_map_trunc_page(offset));
+ vm_map_clip_end(target_map, map_entry, vm_map_round_page(offset) + map_size);
+ force_shadow = TRUE;
+
+ map_size = map_entry->vme_end - map_entry->vme_start;
+ total_size = map_size;
+
+ vm_map_lock_write_to_read(target_map);
+ vm_object_lock(object);
+ }
+#endif /* !CONFIG_EMBEDDED */
+
if(object->internal) {
/* vm_map_lookup_locked will create a shadow if */
/* needs_copy is set but does not check for the */
/* set up an object which will not be pulled from */
/* under us. */
- if ((map_entry->needs_copy || object->shadowed ||
- (object->vo_size > total_size))
- && !object->true_share) {
+ if (force_shadow ||
+ ((map_entry->needs_copy ||
+ object->shadowed ||
+ (object->vo_size > total_size)) &&
+ !object->true_share)) {
/*
* We have to unlock the VM object before
* trying to upgrade the VM map lock, to
object, map_entry->offset,
total_size,
((map_entry->is_shared
- || target_map->mapped)
+ || target_map->mapped_in_other_pmaps)
? PMAP_NULL :
target_map->pmap),
map_entry->vme_start,
vm_object_lock(shadow_object);
while (total_size) {
+ assert((next_entry->wired_count == 0) ||
+ (map_entry->wired_count));
+
if(next_entry->object.vm_object == object) {
vm_object_reference_locked(shadow_object);
next_entry->object.vm_object
wimg_mode = VM_WIMG_IO;
} else if (access == MAP_MEM_COPYBACK) {
wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ wimg_mode = VM_WIMG_INNERWBACK;
} else if (access == MAP_MEM_WTHRU) {
wimg_mode = VM_WIMG_WTHRU;
} else if (access == MAP_MEM_WCOMB) {
assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
#endif /* MACH_ASSERT */
named_entry = (vm_named_entry_t)port->ip_kobject;
- lck_mtx_lock(&(named_entry)->Lock);
+
+ named_entry_lock(named_entry);
named_entry->ref_count -= 1;
+
if(named_entry->ref_count == 0) {
if (named_entry->is_sub_map) {
vm_map_deallocate(named_entry->backing.map);
vm_object_deallocate(named_entry->backing.object);
} /* else JMM - need to drop reference on pager in that case */
- lck_mtx_unlock(&(named_entry)->Lock);
+ named_entry_unlock(named_entry);
+ named_entry_lock_destroy(named_entry);
kfree((void *) port->ip_kobject,
sizeof (struct vm_named_entry));
} else
- lck_mtx_unlock(&(named_entry)->Lock);
+ named_entry_unlock(named_entry);
}
/* Allow manipulation of individual page state. This is actually part of */