- /* map this memory object in place of the current one */
- map_addr = start;
- kr = vm_map_enter_mem_object(map,
- &map_addr,
- end - start,
- (mach_vm_offset_t) 0,
- VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
- (ipc_port_t) protected_mem_obj,
- (map_entry->offset +
- (start - map_entry->vme_start)),
- TRUE,
- map_entry->protection,
- map_entry->max_protection,
- map_entry->inheritance);
- assert(map_addr == start);
- /*
- * Release the reference obtained by apple_protect_pager_setup().
- * The mapping (if it succeeded) is now holding a reference on the
- * memory object.
- */
- memory_object_deallocate(protected_mem_obj);
+ /* limit the map entry to the area we want to cover */
+ vm_map_clip_start(map, map_entry, start_aligned);
+ vm_map_clip_end(map, map_entry, end_aligned);
+
+ tmp_entry = *map_entry;
+ map_entry = VM_MAP_ENTRY_NULL; /* not valid after unlocking map */
+ vm_map_unlock(map);
+ map_locked = FALSE;
+
+ /*
+ * This map entry might be only partially encrypted
+ * (if not fully "page-aligned").
+ */
+ crypto_start = 0;
+ crypto_end = tmp_entry.vme_end - tmp_entry.vme_start;
+ if (tmp_entry.vme_start < start) {
+ if (tmp_entry.vme_start != start_aligned) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ crypto_start += (start - tmp_entry.vme_start);
+ }
+ if (tmp_entry.vme_end > end) {
+ if (tmp_entry.vme_end != end_aligned) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ crypto_end -= (tmp_entry.vme_end - end);
+ }
+
+ /*
+ * This "extra backing offset" is needed to get the decryption
+ * routine to use the right key. It adjusts for the possibly
+ * relative offset of an interposed "4K" pager...
+ */
+ if (crypto_backing_offset == (vm_object_offset_t) -1) {
+ crypto_backing_offset = VME_OFFSET(&tmp_entry);
+ }
+
+ /*
+ * Lookup (and create if necessary) the protected memory object
+ * matching that VM object.
+ * If successful, this also grabs a reference on the memory object,
+ * to guarantee that it doesn't go away before we get a chance to map
+ * it.
+ */
+ unprotected_mem_obj = apple_protect_pager_setup(
+ protected_object,
+ VME_OFFSET(&tmp_entry),
+ crypto_backing_offset,
+ crypt_info,
+ crypto_start,
+ crypto_end);
+
+ /* release extra ref on protected object */
+ vm_object_deallocate(protected_object);
+
+ if (unprotected_mem_obj == NULL) {
+ kr = KERN_FAILURE;
+ goto done;
+ }
+
+ vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
+
+ /* map this memory object in place of the current one */
+ map_addr = tmp_entry.vme_start;
+ kr = vm_map_enter_mem_object(map,
+ &map_addr,
+ (tmp_entry.vme_end -
+ tmp_entry.vme_start),
+ (mach_vm_offset_t) 0,
+ vm_flags,
+ (ipc_port_t) unprotected_mem_obj,
+ 0,
+ TRUE,
+ tmp_entry.protection,
+ tmp_entry.max_protection,
+ tmp_entry.inheritance);
+ assert(kr == KERN_SUCCESS);
+ assert(map_addr == tmp_entry.vme_start);
+
+#if VM_MAP_DEBUG_APPLE_PROTECT
+ printf("APPLE_PROTECT: map %p [0x%llx:0x%llx] pager %p: "
+ "backing:[object:%p,offset:0x%llx,"
+ "crypto_backing_offset:0x%llx,"
+ "crypto_start:0x%llx,crypto_end:0x%llx]\n",
+ map,
+ (uint64_t) map_addr,
+ (uint64_t) (map_addr + (tmp_entry.vme_end -
+ tmp_entry.vme_start)),
+ unprotected_mem_obj,
+ protected_object,
+ VME_OFFSET(&tmp_entry),
+ crypto_backing_offset,
+ crypto_start,
+ crypto_end);
+#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
+
+ /*
+ * Release the reference obtained by
+ * apple_protect_pager_setup().
+ * The mapping (if it succeeded) is now holding a reference on
+ * the memory object.
+ */
+ memory_object_deallocate(unprotected_mem_obj);
+ unprotected_mem_obj = MEMORY_OBJECT_NULL;
+
+ /* continue with next map entry */
+ crypto_backing_offset += (tmp_entry.vme_end -
+ tmp_entry.vme_start);
+ crypto_backing_offset -= crypto_start;
+ }
+ kr = KERN_SUCCESS;