+
+extern int uiomove64(addr64_t, int, void *);
+#define MAX_RUN 32
+
+int
+memory_object_control_uiomove(
+ memory_object_control_t control,
+ memory_object_offset_t offset,
+ void * uio,
+ int start_offset,
+ int io_requested,
+ int mark_dirty)
+{
+ vm_object_t object;
+ vm_page_t dst_page;
+ int xsize;
+ int retval = 0;
+ int cur_run;
+ int cur_needed;
+ int i;
+ vm_page_t page_run[MAX_RUN];
+
+
+ object = memory_object_control_to_vm_object(control);
+ if (object == VM_OBJECT_NULL) {
+ return (0);
+ }
+ assert(!object->internal);
+
+ vm_object_lock(object);
+
+ if (mark_dirty && object->copy != VM_OBJECT_NULL) {
+ /*
+ * We can't modify the pages without honoring
+ * copy-on-write obligations first, so fall off
+ * this optimized path and fall back to the regular
+ * path.
+ */
+ vm_object_unlock(object);
+ return 0;
+ }
+
+ while (io_requested && retval == 0) {
+
+ cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
+
+ if (cur_needed > MAX_RUN)
+ cur_needed = MAX_RUN;
+
+ for (cur_run = 0; cur_run < cur_needed; ) {
+
+ if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
+ break;
+ /*
+ * Sync up on getting the busy bit
+ */
+ if ((dst_page->busy || dst_page->cleaning)) {
+ /*
+ * someone else is playing with the page... if we've
+ * already collected pages into this run, go ahead
+ * and process now, we can't block on this
+ * page while holding other pages in the BUSY state
+ * otherwise we will wait
+ */
+ if (cur_run)
+ break;
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
+ }
+ /*
+ * this routine is only called when copying
+ * to/from real files... no need to consider
+ * encrypted swap pages
+ */
+ assert(!dst_page->encrypted);
+
+ if (mark_dirty)
+ dst_page->dirty = TRUE;
+ dst_page->busy = TRUE;
+
+ page_run[cur_run++] = dst_page;
+
+ offset += PAGE_SIZE_64;
+ }
+ if (cur_run == 0)
+ /*
+ * we hit a 'hole' in the cache
+ * we bail at this point
+ * we'll unlock the object below
+ */
+ break;
+ vm_object_unlock(object);
+
+ for (i = 0; i < cur_run; i++) {
+
+ dst_page = page_run[i];
+
+ if ((xsize = PAGE_SIZE - start_offset) > io_requested)
+ xsize = io_requested;
+
+ if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
+ break;
+
+ io_requested -= xsize;
+ start_offset = 0;
+ }
+ vm_object_lock(object);
+
+ for (i = 0; i < cur_run; i++) {
+ dst_page = page_run[i];
+
+ PAGE_WAKEUP_DONE(dst_page);
+ }
+ }
+ vm_object_unlock(object);
+
+ return (retval);
+}
+
+