+ /*
+ * The new mapping failed. Attempt to restore
+ * the old mappings, saved in the "zap_old_map".
+ */
+ if (!map_locked) {
+ vm_map_lock(map);
+ map_locked = TRUE;
+ }
+
+ /* first check if the coast is still clear */
+ start = vm_map_first_entry(zap_old_map)->vme_start;
+ end = vm_map_last_entry(zap_old_map)->vme_end;
+ if (vm_map_lookup_entry(map, start, &entry1) ||
+ vm_map_lookup_entry(map, end, &entry2) ||
+ entry1 != entry2) {
+ /*
+ * Part of that range has already been
+ * re-mapped: we can't restore the old
+ * mappings...
+ */
+ vm_map_enter_restore_failures++;
+ } else {
+ /*
+ * Transfer the saved map entries from
+ * "zap_old_map" to the original "map",
+ * inserting them all after "entry1".
+ */
+ for (entry2 = vm_map_first_entry(zap_old_map);
+ entry2 != vm_map_to_entry(zap_old_map);
+ entry2 = vm_map_first_entry(zap_old_map)) {
+ vm_map_entry_unlink(zap_old_map,
+ entry2);
+ vm_map_entry_link(map, entry1, entry2);
+ entry1 = entry2;
+ }
+ if (map->wiring_required) {
+ /*
+ * XXX TODO: we should rewire the
+ * old pages here...
+ */
+ }
+ vm_map_enter_restore_successes++;
+ }
+ }
+ }
+
+ if (map_locked) {
+ vm_map_unlock(map);
+ }
+
+ /*
+ * Get rid of the "zap_maps" and all the map entries that
+ * they may still contain.
+ */
+ if (zap_old_map != VM_MAP_NULL) {
+ vm_map_destroy(zap_old_map);
+ zap_old_map = VM_MAP_NULL;
+ }
+ if (zap_new_map != VM_MAP_NULL) {
+ vm_map_destroy(zap_new_map);
+ zap_new_map = VM_MAP_NULL;
+ }
+
+ return result;
+
+#undef RETURN
+}
+
+
+#if VM_CPM
+
+#ifdef MACH_ASSERT
+extern pmap_paddr_t avail_start, avail_end;
+#endif
+
+/*
+ * Allocate memory in the specified map, with the caveat that
+ * the memory is physically contiguous. This call may fail
+ * if the system can't find sufficient contiguous memory.
+ * This call may cause or lead to heart-stopping amounts of
+ * paging activity.
+ *
+ * Memory obtained from this call should be freed in the
+ * normal way, viz., via vm_deallocate.
+ */
+kern_return_t
+vm_map_enter_cpm(
+ vm_map_t map,
+ vm_map_offset_t *addr,
+ vm_map_size_t size,
+ int flags)
+{
+ vm_object_t cpm_obj;
+ pmap_t pmap;
+ vm_page_t m, pages;
+ kern_return_t kr;
+ vm_map_offset_t va, start, end, offset;
+#if MACH_ASSERT
+ vm_map_offset_t prev_addr;
+#endif /* MACH_ASSERT */
+
+ boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0);
+
+ if (!vm_allocate_cpm_enabled)
+ return KERN_FAILURE;
+
+ if (size == 0) {
+ *addr = 0;
+ return KERN_SUCCESS;
+ }
+
+ if (anywhere)
+ *addr = vm_map_min(map);
+ else
+ *addr = vm_map_trunc_page(*addr);
+ size = vm_map_round_page(size);
+
+ /*
+ * LP64todo - cpm_allocate should probably allow
+ * allocations of >4GB, but not with the current
+ * algorithm, so just cast down the size for now.
+ */
+ if (size > VM_MAX_ADDRESS)
+ return KERN_RESOURCE_SHORTAGE;
+ if ((kr = cpm_allocate(CAST_DOWN(vm_size_t, size),
+ &pages, TRUE)) != KERN_SUCCESS)
+ return kr;
+
+ cpm_obj = vm_object_allocate((vm_object_size_t)size);
+ assert(cpm_obj != VM_OBJECT_NULL);
+ assert(cpm_obj->internal);
+ assert(cpm_obj->size == (vm_object_size_t)size);
+ assert(cpm_obj->can_persist == FALSE);
+ assert(cpm_obj->pager_created == FALSE);
+ assert(cpm_obj->pageout == FALSE);
+ assert(cpm_obj->shadow == VM_OBJECT_NULL);
+
+ /*
+ * Insert pages into object.
+ */
+
+ vm_object_lock(cpm_obj);
+ for (offset = 0; offset < size; offset += PAGE_SIZE) {
+ m = pages;
+ pages = NEXT_PAGE(m);
+ *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
+
+ assert(!m->gobbled);
+ assert(!m->wanted);
+ assert(!m->pageout);
+ assert(!m->tabled);
+ /*
+ * ENCRYPTED SWAP:
+ * "m" is not supposed to be pageable, so it
+ * should not be encrypted. It wouldn't be safe
+ * to enter it in a new VM object while encrypted.
+ */
+ ASSERT_PAGE_DECRYPTED(m);
+ assert(m->busy);
+ assert(m->phys_page>=(avail_start>>PAGE_SHIFT) && m->phys_page<=(avail_end>>PAGE_SHIFT));
+
+ m->busy = FALSE;
+ vm_page_insert(m, cpm_obj, offset);
+ }
+ assert(cpm_obj->resident_page_count == size / PAGE_SIZE);
+ vm_object_unlock(cpm_obj);
+
+ /*
+ * Hang onto a reference on the object in case a
+ * multi-threaded application for some reason decides
+ * to deallocate the portion of the address space into
+ * which we will insert this object.
+ *
+ * Unfortunately, we must insert the object now before
+ * we can talk to the pmap module about which addresses
+ * must be wired down. Hence, the race with a multi-
+ * threaded app.
+ */
+ vm_object_reference(cpm_obj);
+
+ /*
+ * Insert object into map.
+ */
+
+ kr = vm_map_enter(
+ map,
+ addr,
+ size,
+ (vm_map_offset_t)0,
+ flags,
+ cpm_obj,
+ (vm_object_offset_t)0,
+ FALSE,
+ VM_PROT_ALL,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
+ /*
+ * A CPM object doesn't have can_persist set,
+ * so all we have to do is deallocate it to
+ * free up these pages.
+ */
+ assert(cpm_obj->pager_created == FALSE);
+ assert(cpm_obj->can_persist == FALSE);
+ assert(cpm_obj->pageout == FALSE);
+ assert(cpm_obj->shadow == VM_OBJECT_NULL);
+ vm_object_deallocate(cpm_obj); /* kill acquired ref */
+ vm_object_deallocate(cpm_obj); /* kill creation ref */
+ }
+
+ /*
+ * Inform the physical mapping system that the
+ * range of addresses may not fault, so that
+ * page tables and such can be locked down as well.
+ */
+ start = *addr;
+ end = start + size;
+ pmap = vm_map_pmap(map);
+ pmap_pageable(pmap, start, end, FALSE);
+
+ /*
+ * Enter each page into the pmap, to avoid faults.
+ * Note that this loop could be coded more efficiently,
+ * if the need arose, rather than looking up each page
+ * again.
+ */
+ for (offset = 0, va = start; offset < size;
+ va += PAGE_SIZE, offset += PAGE_SIZE) {
+ vm_object_lock(cpm_obj);
+ m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
+ vm_object_unlock(cpm_obj);
+ assert(m != VM_PAGE_NULL);
+ PMAP_ENTER(pmap, va, m, VM_PROT_ALL,
+ ((unsigned int)(m->object->wimg_bits)) & VM_WIMG_MASK,
+ TRUE);
+ }
+
+#if MACH_ASSERT
+ /*
+ * Verify ordering in address space.
+ */
+ for (offset = 0; offset < size; offset += PAGE_SIZE) {
+ vm_object_lock(cpm_obj);
+ m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
+ vm_object_unlock(cpm_obj);
+ if (m == VM_PAGE_NULL)
+ panic("vm_allocate_cpm: obj 0x%x off 0x%x no page",
+ cpm_obj, offset);
+ assert(m->tabled);
+ assert(!m->busy);
+ assert(!m->wanted);
+ assert(!m->fictitious);
+ assert(!m->private);
+ assert(!m->absent);
+ assert(!m->error);
+ assert(!m->cleaning);
+ assert(!m->precious);
+ assert(!m->clustered);
+ if (offset != 0) {
+ if (m->phys_page != prev_addr + 1) {
+ printf("start 0x%x end 0x%x va 0x%x\n",
+ start, end, va);
+ printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
+ printf("m 0x%x prev_address 0x%x\n", m,
+ prev_addr);
+ panic("vm_allocate_cpm: pages not contig!");
+ }
+ }
+ prev_addr = m->phys_page;
+ }
+#endif /* MACH_ASSERT */
+
+ vm_object_deallocate(cpm_obj); /* kill extra ref */
+
+ return kr;
+}
+
+
+#else /* VM_CPM */
+
+/*
+ * Interface is defined in all cases, but unless the kernel
+ * is built explicitly for this option, the interface does
+ * nothing.
+ */
+
+kern_return_t
+vm_map_enter_cpm(
+ __unused vm_map_t map,
+ __unused vm_map_offset_t *addr,
+ __unused vm_map_size_t size,
+ __unused int flags)
+{
+ return KERN_FAILURE;
+}
+#endif /* VM_CPM */
+
+/*
+ * vm_map_clip_start: [ internal use only ]
+ *
+ * Asserts that the given entry begins at or after
+ * the specified address; if necessary,
+ * it splits the entry into two.
+ */
+#ifndef NO_NESTED_PMAP
+#define vm_map_clip_start(map, entry, startaddr) \
+MACRO_BEGIN \
+ vm_map_t VMCS_map; \
+ vm_map_entry_t VMCS_entry; \
+ vm_map_offset_t VMCS_startaddr; \
+ VMCS_map = (map); \
+ VMCS_entry = (entry); \
+ VMCS_startaddr = (startaddr); \
+ if (VMCS_startaddr > VMCS_entry->vme_start) { \
+ if(entry->use_pmap) { \
+ vm_map_offset_t pmap_base_addr; \
+ \
+ pmap_base_addr = 0xF0000000 & entry->vme_start; \
+ pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
+ entry->use_pmap = FALSE; \
+ } else if(entry->object.vm_object \
+ && !entry->is_sub_map \
+ && entry->object.vm_object->phys_contiguous) { \
+ pmap_remove(map->pmap, \
+ (addr64_t)(entry->vme_start), \
+ (addr64_t)(entry->vme_end)); \
+ } \
+ _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
+ } \
+ UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
+MACRO_END
+#else /* NO_NESTED_PMAP */
+#define vm_map_clip_start(map, entry, startaddr) \
+MACRO_BEGIN \