+
+ if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
+ dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
+ vm_page_t low_page;
+ int refmod;
+
+ /*
+ * support devices that can't DMA above 32 bits
+ * by substituting pages from a pool of low address
+ * memory for any pages we find above the 4G mark
+ * can't substitute if the page is already wired because
+ * we don't know whether that physical address has been
+ * handed out to some other 64 bit capable DMA device to use
+ */
+ if (dst_page->wire_count) {
+ ret = KERN_PROTECTION_FAILURE;
+ goto return_err;
+ }
+ if (delayed_unlock) {
+ delayed_unlock = 0;
+ vm_page_unlock_queues();
+ }
+ low_page = vm_page_grablo();
+
+ if (low_page == VM_PAGE_NULL) {
+ ret = KERN_RESOURCE_SHORTAGE;
+ goto return_err;
+ }
+ /*
+ * from here until the vm_page_replace completes
+ * we musn't drop the object lock... we don't
+ * want anyone refaulting this page in and using
+ * it after we disconnect it... we want the fault
+ * to find the new page being substituted.
+ */
+ refmod = pmap_disconnect(dst_page->phys_page);
+
+ vm_page_copy(dst_page, low_page);
+
+ low_page->reference = dst_page->reference;
+ low_page->dirty = dst_page->dirty;
+
+ if (refmod & VM_MEM_REFERENCED)
+ low_page->reference = TRUE;
+ if (refmod & VM_MEM_MODIFIED)
+ low_page->dirty = TRUE;
+
+ vm_page_lock_queues();
+ vm_page_replace(low_page, object, dst_offset);
+ /*
+ * keep the queue lock since we're going to
+ * need it immediately
+ */
+ delayed_unlock = 1;
+
+ dst_page = low_page;
+ /*
+ * vm_page_grablo returned the page marked
+ * BUSY... we don't need a PAGE_WAKEUP_DONE
+ * here, because we've never dropped the object lock
+ */
+ dst_page->busy = FALSE;
+ }