- if (object->internal) {
- /* vm_map_lookup_locked will create a shadow if */
- /* needs_copy is set but does not check for the */
- /* other two conditions shown. It is important to */
- /* set up an object which will not be pulled from */
- /* under us. */
-
- if (force_shadow ||
- ((map_entry->needs_copy ||
- object->shadowed ||
- (object->vo_size > total_size &&
- (VME_OFFSET(map_entry) != 0 ||
- object->vo_size >
- vm_map_round_page(total_size,
- VM_MAP_PAGE_MASK(target_map)))))
- && !object->true_share)) {
- /*
- * We have to unlock the VM object before
- * trying to upgrade the VM map lock, to
- * honor lock ordering (map then object).
- * Otherwise, we would deadlock if another
- * thread holds a read lock on the VM map and
- * is trying to acquire the VM object's lock.
- * We still hold an extra reference on the
- * VM object, guaranteeing that it won't
- * disappear.
- */
- vm_object_unlock(object);
-
- if (vm_map_lock_read_to_write(target_map)) {
- /*
- * We couldn't upgrade our VM map lock
- * from "read" to "write" and we lost
- * our "read" lock.
- * Start all over again...
- */
- vm_object_deallocate(object); /* extra ref */
- target_map = original_map;
- goto redo_lookup;
- }
-#if 00
- vm_object_lock(object);
-#endif
-
- /*
- * JMM - We need to avoid coming here when the object
- * is wired by anybody, not just the current map. Why
- * couldn't we use the standard vm_object_copy_quickly()
- * approach here?
- */
-
- /* create a shadow object */
- VME_OBJECT_SHADOW(map_entry, total_size);
- shadow_object = VME_OBJECT(map_entry);
-#if 00
- vm_object_unlock(object);
-#endif
-
- prot = map_entry->protection & ~VM_PROT_WRITE;
-
- if (override_nx(target_map,
- VME_ALIAS(map_entry))
- && prot)
- prot |= VM_PROT_EXECUTE;
-
- vm_object_pmap_protect(
- object, VME_OFFSET(map_entry),
- total_size,
- ((map_entry->is_shared
- || target_map->mapped_in_other_pmaps)
- ? PMAP_NULL :
- target_map->pmap),
- map_entry->vme_start,
- prot);
- total_size -= (map_entry->vme_end
- - map_entry->vme_start);
- next_entry = map_entry->vme_next;
- map_entry->needs_copy = FALSE;
-
- vm_object_lock(shadow_object);
- while (total_size) {
- assert((next_entry->wired_count == 0) ||
- (map_entry->wired_count));
-
- if (VME_OBJECT(next_entry) == object) {
- vm_object_reference_locked(shadow_object);
- VME_OBJECT_SET(next_entry,
- shadow_object);
- vm_object_deallocate(object);
- VME_OFFSET_SET(
- next_entry,
- (VME_OFFSET(next_entry->vme_prev) +
- (next_entry->vme_prev->vme_end
- - next_entry->vme_prev->vme_start)));
- next_entry->needs_copy = FALSE;
- } else {
- panic("mach_make_memory_entry_64:"
- " map entries out of sync\n");
- }
- total_size -=
- next_entry->vme_end
- - next_entry->vme_start;
- next_entry = next_entry->vme_next;
- }
-
- /*
- * Transfer our extra reference to the
- * shadow object.
- */
- vm_object_reference_locked(shadow_object);
- vm_object_deallocate(object); /* extra ref */
- object = shadow_object;
-
- obj_off = ((local_offset - map_entry->vme_start)
- + VME_OFFSET(map_entry));
-
- vm_map_lock_write_to_read(target_map);
- }
- }
-
- /* note: in the future we can (if necessary) allow for */
- /* memory object lists, this will better support */
- /* fragmentation, but is it necessary? The user should */
- /* be encouraged to create address space oriented */
- /* shared objects from CLEAN memory regions which have */
- /* a known and defined history. i.e. no inheritence */
- /* share, make this call before making the region the */
- /* target of ipc's, etc. The code above, protecting */
- /* against delayed copy, etc. is mostly defensive. */
-
- wimg_mode = object->wimg_bits;
- if(!(object->nophyscache)) {
- if(access == MAP_MEM_IO) {
- wimg_mode = VM_WIMG_IO;
- } else if (access == MAP_MEM_COPYBACK) {
- wimg_mode = VM_WIMG_USE_DEFAULT;
- } else if (access == MAP_MEM_INNERWBACK) {
- wimg_mode = VM_WIMG_INNERWBACK;
- } else if (access == MAP_MEM_WTHRU) {
- wimg_mode = VM_WIMG_WTHRU;
- } else if (access == MAP_MEM_WCOMB) {
- wimg_mode = VM_WIMG_WCOMB;
- }
- }
-
-#if VM_OBJECT_TRACKING_OP_TRUESHARE
- if (!object->true_share &&
- vm_object_tracking_inited) {
- void *bt[VM_OBJECT_TRACKING_BTDEPTH];
- int num = 0;
-
- num = OSBacktrace(bt,
- VM_OBJECT_TRACKING_BTDEPTH);
- btlog_add_entry(vm_object_tracking_btlog,
- object,
- VM_OBJECT_TRACKING_OP_TRUESHARE,
- bt,
- num);