+ request_flags = UPL_NO_SYNC | UPL_RET_ONLY_ABSENT | UPL_SET_LITE;
+
+ if (cnt == (dp_size_t) -1)
+ reclaim_all = TRUE;
+
+ if (reclaim_all == TRUE) {
+ /*
+ * We've been called from ps_vstruct_reclaim() to move all
+ * the object's swapped pages back to VM pages.
+ * This can put memory pressure on the system, so we do want
+ * to wait for free pages, to avoid getting in the way of the
+ * vm_pageout_scan() thread.
+ * Let's not use UPL_NOBLOCK in this case.
+ */
+ vs_offset &= ~cl_mask;
+ i = pages_in_cl;
+ } else {
+ i = 1;
+
+ /*
+ * if the I/O cluster size == PAGE_SIZE, we don't want to set
+ * the UPL_NOBLOCK since we may be trying to recover from a
+ * previous partial pagein I/O that occurred because we were low
+ * on memory and bailed early in order to honor the UPL_NOBLOCK...
+ * since we're only asking for a single page, we can block w/o fear
+ * of tying up pages while waiting for more to become available
+ */
+ if (fault_info == NULL || ((vm_object_fault_info_t)fault_info)->cluster_size > PAGE_SIZE)
+ request_flags |= UPL_NOBLOCK;
+ }
+
+again:
+ cl_index = (vs_offset & cl_mask) / vm_page_size;
+
+ if ((ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0) == (dp_offset_t)-1) ||
+ !CLMAP_ISSET(clmap, cl_index)) {
+ /*
+ * the needed page doesn't exist in the backing store...
+ * we don't want to try to do any I/O, just abort the
+ * page and let the fault handler provide a zero-fill
+ */
+ if (cnt == 0) {
+ /*
+ * The caller was just poking at us to see if
+ * the page has been paged out. No need to
+ * mess with the page at all.
+ * Just let the caller know we don't have that page.
+ */
+ return KERN_FAILURE;
+ }
+ if (reclaim_all == TRUE) {
+ i--;
+ if (i == 0) {
+ /* no more pages in this cluster */
+ return KERN_FAILURE;
+ }
+ /* try the next page in this cluster */
+ vs_offset += vm_page_size;
+ goto again;
+ }
+
+ page_list_count = 0;
+
+ memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
+ PAGE_SIZE, PAGE_SIZE,
+ &upl, NULL, &page_list_count,
+ request_flags | UPL_SET_INTERNAL);
+ upl_range_needed(upl, 0, 1);
+
+ if (clmap.cl_error)
+ upl_abort(upl, UPL_ABORT_ERROR);
+ else
+ upl_abort(upl, UPL_ABORT_UNAVAILABLE);
+ upl_deallocate(upl);
+
+ return KERN_SUCCESS;
+ }
+
+ if (cnt == 0) {
+ /*
+ * The caller was just poking at us to see if
+ * the page has been paged out. No need to
+ * mess with the page at all.
+ * Just let the caller know we do have that page.
+ */
+ return KERN_SUCCESS;
+ }
+
+ if(((vm_object_fault_info_t)fault_info)->io_sync == TRUE ) {
+ io_sync = TRUE;
+ } else {
+#if RECLAIM_SWAP
+ io_sync = TRUE;
+#endif /* RECLAIM_SWAP */
+ }
+
+ if( io_sync == TRUE ) {
+
+ io_flags |= UPL_IOSYNC | UPL_NOCOMMIT;
+#if USE_PRECIOUS
+ request_flags |= UPL_PRECIOUS | UPL_CLEAN_IN_PLACE;
+#else /* USE_PRECIOUS */
+ request_flags |= UPL_REQUEST_SET_DIRTY;
+#endif /* USE_PRECIOUS */
+ }
+
+ assert(dp_encryption_inited);
+ if (dp_encryption) {
+ /*
+ * ENCRYPTED SWAP:
+ * request that the UPL be prepared for
+ * decryption.
+ */
+ request_flags |= UPL_ENCRYPT;
+ io_flags |= UPL_PAGING_ENCRYPTED;
+ }
+ orig_vs_offset = vs_offset;
+
+ assert(cnt != 0);
+ cnt = VM_SUPER_CLUSTER;
+ cluster_start = (memory_object_offset_t) vs_offset;
+ cluster_length = (vm_size_t) cnt;
+ io_streaming = 0;
+
+ /*
+ * determine how big a speculative I/O we should try for...
+ */
+ if (memory_object_cluster_size(vs->vs_control, &cluster_start, &cluster_length, &io_streaming, (memory_object_fault_info_t)fault_info) == KERN_SUCCESS) {
+ assert(vs_offset >= (dp_offset_t) cluster_start &&
+ vs_offset < (dp_offset_t) (cluster_start + cluster_length));
+ vs_offset = (dp_offset_t) cluster_start;
+ cnt = (dp_size_t) cluster_length;
+ } else {
+ cluster_length = PAGE_SIZE;
+ cnt = PAGE_SIZE;
+ }
+
+ if (io_streaming)
+ io_flags |= UPL_IOSTREAMING;
+
+ last_start = cluster_start;
+ last_length = cluster_length;
+