+
+#if !CONFIG_EMBEDDED
+ local_object = entry->object.vm_object;
+ if (vm_map_entry_should_cow_for_true_share(entry) &&
+ local_object->vo_size > *upl_size &&
+ *upl_size != 0) {
+ vm_prot_t prot;
+
+ /*
+ * Set up the targeted range for copy-on-write to avoid
+ * applying true_share/copy_delay to the entire object.
+ */
+
+ if (vm_map_lock_read_to_write(map)) {
+ goto REDISCOVER_ENTRY;
+ }
+
+ vm_map_clip_start(map, entry, vm_map_trunc_page(offset));
+ vm_map_clip_end(map, entry, vm_map_round_page(offset + *upl_size));
+ prot = entry->protection & ~VM_PROT_WRITE;
+ if (override_nx(map, entry->alias) && prot)
+ prot |= VM_PROT_EXECUTE;
+ vm_object_pmap_protect(local_object,
+ entry->offset,
+ entry->vme_end - entry->vme_start,
+ ((entry->is_shared || map->mapped)
+ ? PMAP_NULL
+ : map->pmap),
+ entry->vme_start,
+ prot);
+ entry->needs_copy = TRUE;
+
+ vm_map_lock_write_to_read(map);
+ }
+#endif /* !CONFIG_EMBEDDED */
+