]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/default_pager/dp_backing_store.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / default_pager / dp_backing_store.c
index e771c10f38f6971bfcf09871c2b67bf383de24ae..8d01636258de3f8efe96d75de538b4a36c4f6fa3 100644 (file)
@@ -1,22 +1,24 @@
-
 /*
  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
@@ -82,8 +84,8 @@
 #define ALLOC_STRIDE  (1024 * 1024 * 1024)
 int physical_transfer_cluster_count = 0;
 
-#define VM_SUPER_CLUSTER       0x20000
-#define VM_SUPER_PAGES          32
+#define VM_SUPER_CLUSTER       0x40000
+#define VM_SUPER_PAGES          64
 
 /*
  * 0 means no shift to pages, so == 1 page/cluster. 1 would mean
@@ -191,9 +193,7 @@ get_read_buffer()
                          return  dpt_array[i];
                        }
                }
-               assert_wait(&dpt_array, THREAD_UNINT);
-               DPT_UNLOCK(dpt_lock);
-               thread_block((void(*)(void))0);
+               DPT_SLEEP(dpt_lock, &dpt_array, THREAD_UNINT);
        }
 }
 
@@ -650,12 +650,7 @@ ps_delete(
 
 
        while(backing_store_release_trigger_disable != 0) {
-               assert_wait((event_t) 
-                       &backing_store_release_trigger_disable, 
-                       THREAD_UNINT);
-               VSL_UNLOCK();
-               thread_block((void (*)(void)) 0);
-               VSL_LOCK();
+               VSL_SLEEP(&backing_store_release_trigger_disable, THREAD_UNINT);
        }
 
        /* we will choose instead to hold a send right */
@@ -701,22 +696,14 @@ ps_delete(
                                UPL_NO_SYNC | UPL_CLEAN_IN_PLACE
                                            | UPL_SET_INTERNAL);
                        if(error == KERN_SUCCESS) {
-#ifndef ubc_sync_working
-                               upl_commit(upl, NULL);
-                               upl_deallocate(upl);
-                               error = ps_vstruct_transfer_from_segment(
-                                               vs, ps, transfer_object);
-#else
                                error = ps_vstruct_transfer_from_segment(
                                                        vs, ps, upl);
                                upl_commit(upl, NULL);
                                upl_deallocate(upl);
-#endif
-                               vm_object_deallocate(transfer_object);
                        } else {
-                               vm_object_deallocate(transfer_object);
                                error = KERN_FAILURE;
                        }
+                       vm_object_deallocate(transfer_object);
                }
                if(error) {
                        VS_LOCK(vs);
@@ -734,12 +721,8 @@ ps_delete(
                VSL_LOCK(); 
 
                while(backing_store_release_trigger_disable != 0) {
-                       assert_wait((event_t) 
-                               &backing_store_release_trigger_disable, 
-                               THREAD_UNINT);
-                       VSL_UNLOCK();
-                       thread_block((void (*)(void)) 0);
-                       VSL_LOCK();
+                       VSL_SLEEP(&backing_store_release_trigger_disable,
+                                 THREAD_UNINT);
                }
 
                next_vs = (vstruct_t) queue_next(&(vs->vs_links));
@@ -1594,7 +1577,7 @@ ps_deallocate_cluster(
                            &backing_store_release_trigger_disable, 
                            THREAD_UNINT);
                        VSL_UNLOCK();
-                       thread_block((void (*)(void)) 0);
+                       thread_block(THREAD_CONTINUE_NULL);
                } else {
                        VSL_UNLOCK();
                }
@@ -2288,7 +2271,7 @@ ps_read_device(
                                 (mach_msg_type_number_t *) &bytes_read);
                if(kr == MIG_NO_REPLY) { 
                        assert_wait(&vsa->vsa_lock, THREAD_UNINT);
-                       thread_block((void(*)(void))0);
+                       thread_block(THREAD_CONTINUE_NULL);
 
                        dev_buffer = vsa->vsa_addr;
                        bytes_read = (unsigned int)vsa->vsa_size;
@@ -2561,18 +2544,24 @@ pvs_cluster_read(
         */
 
 #if    USE_PRECIOUS
-       request_flags = UPL_NO_SYNC |  UPL_CLEAN_IN_PLACE | UPL_PRECIOUS;
+       request_flags = UPL_NO_SYNC |  UPL_CLEAN_IN_PLACE | UPL_PRECIOUS | UPL_RET_ONLY_ABSENT;
 #else
-       request_flags = UPL_NO_SYNC |  UPL_CLEAN_IN_PLACE ;
+       request_flags = UPL_NO_SYNC |  UPL_CLEAN_IN_PLACE | UPL_RET_ONLY_ABSENT;
 #endif
        while (cnt && (error == KERN_SUCCESS)) {
                int     ps_info_valid;
                int     page_list_count;
 
-               if (cnt > VM_SUPER_CLUSTER)
+               if((vs_offset & cl_mask) && 
+                       (cnt > (VM_SUPER_CLUSTER - 
+                               (vs_offset & cl_mask)))) {
+                       size = VM_SUPER_CLUSTER;
+                       size -= vs_offset & cl_mask;
+               } else if (cnt > VM_SUPER_CLUSTER) {
                        size = VM_SUPER_CLUSTER;
-               else
+               } else {
                        size = cnt;
+               }
                cnt -= size;
 
                ps_info_valid = 0;
@@ -2677,9 +2666,11 @@ pvs_cluster_read(
                         */
                        for (xfer_size = 0; xfer_size < size; ) {
 
-                               while (cl_index < pages_in_cl && xfer_size < size) {
+                               while (cl_index < pages_in_cl 
+                                               && xfer_size < size) {
                                        /*
-                                        * accumulate allocated pages within a physical segment
+                                        * accumulate allocated pages within
+                                        * a physical segment
                                         */
                                        if (CLMAP_ISSET(clmap, cl_index)) {
                                                xfer_size  += vm_page_size;
@@ -2691,35 +2682,43 @@ pvs_cluster_read(
                                        } else
                                                break;
                                }
-                               if (cl_index < pages_in_cl || xfer_size >= size) {
+                               if (cl_index < pages_in_cl 
+                                               || xfer_size >= size) {
                                        /*
-                                        * we've hit an unallocated page or the
-                                        * end of this request... go fire the I/O
+                                        * we've hit an unallocated page or
+                                        * the end of this request... go fire
+                                        * the I/O
                                         */
                                        break;
                                }
                                /*
-                                * we've hit the end of the current physical segment
-                                * and there's more to do, so try moving to the next one
+                                * we've hit the end of the current physical
+                                * segment and there's more to do, so try
+                                * moving to the next one
                                 */
                                seg_index++;
                                  
-                               ps_offset[seg_index] = ps_clmap(vs, cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
-                               psp[seg_index]       = CLMAP_PS(clmap);
+                               ps_offset[seg_index] = 
+                                       ps_clmap(vs,
+                                               cur_offset & ~cl_mask,
+                                               &clmap, CL_FIND, 0, 0);
+                               psp[seg_index] = CLMAP_PS(clmap);
                                ps_info_valid = 1;
 
                                if ((ps_offset[seg_index - 1] != (ps_offset[seg_index] - cl_size)) || (psp[seg_index - 1] != psp[seg_index])) {
                                        /*
-                                        * if the physical segment we're about to step into
-                                        * is not contiguous to the one we're currently
-                                        * in, or it's in a different paging file, or
+                                        * if the physical segment we're about
+                                        * to step into is not contiguous to
+                                        * the one we're currently in, or it's
+                                        * in a different paging file, or
                                         * it hasn't been allocated....
                                         * we stop here and generate the I/O
                                         */
                                        break;
                                }
                                /*
-                                * start with first page of the next physical segment
+                                * start with first page of the next physical
+                                * segment
                                 */
                                cl_index = 0;
                        }
@@ -2730,68 +2729,78 @@ pvs_cluster_read(
                                 */
                                page_list_count = 0;
                                memory_object_super_upl_request(vs->vs_control,
-                                               (memory_object_offset_t)vs_offset,
-                                               xfer_size, xfer_size, 
-                                               &upl, NULL, &page_list_count,
-                                               request_flags | UPL_SET_INTERNAL);
+                                       (memory_object_offset_t)vs_offset,
+                                       xfer_size, xfer_size, 
+                                       &upl, NULL, &page_list_count,
+                                       request_flags | UPL_SET_INTERNAL);
 
-                               error = ps_read_file(psp[beg_pseg], upl, (vm_offset_t) 0, 
-                                               ps_offset[beg_pseg] + (beg_indx * vm_page_size), xfer_size, &residual, 0);
+                               error = ps_read_file(psp[beg_pseg],
+                                       upl, (vm_offset_t) 0, 
+                                       ps_offset[beg_pseg] +
+                                               (beg_indx * vm_page_size),
+                                       xfer_size, &residual, 0);
                        } else
                                continue;
 
                        failed_size = 0;
 
                        /*
-                        * Adjust counts and send response to VM.  Optimize for the
-                        * common case, i.e. no error and/or partial data.
-                        * If there was an error, then we need to error the entire
-                        * range, even if some data was successfully read.
-                        * If there was a partial read we may supply some
+                        * Adjust counts and send response to VM.  Optimize
+                        * for the common case, i.e. no error and/or partial
+                        * data.  If there was an error, then we need to error
+                        * the entire range, even if some data was successfully
+                        * read.  If there was a partial read we may supply some
                         * data and may error some as well.  In all cases the
                         * VM must receive some notification for every page in the
                         * range.
                         */
                        if ((error == KERN_SUCCESS) && (residual == 0)) {
                                /*
-                                * Got everything we asked for, supply the data to
-                                * the VM.  Note that as a side effect of supplying
-                                * the data, the buffer holding the supplied data is
-                                * deallocated from the pager's address space.
+                                * Got everything we asked for, supply the data
+                                * to the VM.  Note that as a side effect of
+                                * supplying * the data, the buffer holding the
+                                * supplied data is * deallocated from the pager's
+                                * address space.
                                 */
-                               pvs_object_data_provided(vs, upl, vs_offset, xfer_size);
+                               pvs_object_data_provided(
+                                       vs, upl, vs_offset, xfer_size);
                        } else {
                                failed_size = xfer_size;
 
                                if (error == KERN_SUCCESS) {
                                        if (residual == xfer_size) {
-                                               /*
-                                                * If a read operation returns no error
-                                                * and no data moved, we turn it into
-                                                * an error, assuming we're reading at
-                                                * or beyong EOF.
-                                                * Fall through and error the entire
-                                                * range.
-                                                */
+                                       /*
+                                        * If a read operation returns no error
+                                        * and no data moved, we turn it into
+                                        * an error, assuming we're reading at
+                                        * or beyong EOF.
+                                        * Fall through and error the entire
+                                        * range.
+                                        */
                                                error = KERN_FAILURE;
                                        } else {
-                                               /*
-                                                * Otherwise, we have partial read. If
-                                                * the part read is a integral number
-                                                * of pages supply it. Otherwise round
-                                                * it up to a page boundary, zero fill
-                                                * the unread part, and supply it.
-                                                * Fall through and error the remainder
-                                                * of the range, if any.
-                                                */
+                                       /*
+                                        * Otherwise, we have partial read. If
+                                        * the part read is a integral number
+                                        * of pages supply it. Otherwise round
+                                        * it up to a page boundary, zero fill
+                                        * the unread part, and supply it.
+                                        * Fall through and error the remainder
+                                        * of the range, if any.
+                                        */
                                                int fill, lsize;
 
-                                               fill = residual & ~vm_page_size;
-                                               lsize = (xfer_size - residual) + fill;
-                                               pvs_object_data_provided(vs, upl, vs_offset, lsize);
+                                               fill = residual 
+                                                       & ~vm_page_size;
+                                               lsize = (xfer_size - residual) 
+                                                                        + fill;
+                                               pvs_object_data_provided(
+                                                       vs, upl,
+                                                       vs_offset, lsize);
 
                                                if (lsize < xfer_size) {
-                                                       failed_size = xfer_size - lsize;
+                                                       failed_size = 
+                                                           xfer_size - lsize;
                                                        error = KERN_FAILURE;
                                                }
                                        }
@@ -2799,12 +2808,13 @@ pvs_cluster_read(
                        }
                        /*
                         * If there was an error in any part of the range, tell
-                        * the VM. Note that error is explicitly checked again since
-                        * it can be modified above.
+                        * the VM. Note that error is explicitly checked again
+                        * since it can be modified above.
                         */
                        if (error != KERN_SUCCESS) {
                                BS_STAT(psp[beg_pseg]->ps_bs,
-                                       psp[beg_pseg]->ps_bs->bs_pages_in_fail += atop(failed_size));
+                                       psp[beg_pseg]->ps_bs->bs_pages_in_fail 
+                                               += atop(failed_size));
                        }
                        size       -= xfer_size;
                        vs_offset  += xfer_size;
@@ -2888,12 +2898,14 @@ vs_cluster_write(
 
                pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
 
-               for (seg_index = 0, transfer_size = upl->size; transfer_size > 0; ) {
+               for (seg_index = 0, transfer_size = upl->size; 
+                                               transfer_size > 0; ) {
 
-                       ps_offset[seg_index] = ps_clmap(vs, upl->offset + (seg_index * cl_size),
-                                                     &clmap, CL_ALLOC, 
-                                                     transfer_size < cl_size ? 
-                                                     transfer_size : cl_size, 0);
+                       ps_offset[seg_index] = 
+                               ps_clmap(vs, upl->offset + (seg_index * cl_size),
+                                      &clmap, CL_ALLOC, 
+                                      transfer_size < cl_size ? 
+                                      transfer_size : cl_size, 0);
 
                        if (ps_offset[seg_index] == (vm_offset_t) -1) {
                                upl_abort(upl, 0);
@@ -2910,21 +2922,25 @@ vs_cluster_write(
                        } else
                                transfer_size = 0;
                }
-               for (page_index = 0, num_of_pages = upl->size / vm_page_size; page_index < num_of_pages; ) {
+               for (page_index = 0,
+                               num_of_pages = upl->size / vm_page_size;
+                               page_index < num_of_pages; ) {
                        /*
                         * skip over non-dirty pages
                         */
                        for ( ; page_index < num_of_pages; page_index++) {
-                               if (UPL_DIRTY_PAGE(pl, page_index) || UPL_PRECIOUS_PAGE(pl, page_index))
+                               if (UPL_DIRTY_PAGE(pl, page_index)
+                                       || UPL_PRECIOUS_PAGE(pl, page_index))
                                        /*
                                         * this is a page we need to write
-                                        * go see if we can buddy it up with others
-                                        * that are contiguous to it
+                                        * go see if we can buddy it up with
+                                        * others that are contiguous to it
                                         */
                                        break;
                                /*
-                                * if the page is not-dirty, but present we need to commit it...
-                                * this is an unusual case since we only asked for dirty pages
+                                * if the page is not-dirty, but present we 
+                                * need to commit it...  This is an unusual
+                                * case since we only asked for dirty pages
                                 */
                                if (UPL_PAGE_PRESENT(pl, page_index)) {
                                        boolean_t empty = FALSE;
@@ -2946,14 +2962,16 @@ vs_cluster_write(
                                break;
 
                        /*
-                        * gather up contiguous dirty pages... we have at least 1
-                        * otherwise we would have bailed above
+                        * gather up contiguous dirty pages... we have at
+                        * least 1 otherwise we would have bailed above
                         * make sure that each physical segment that we step
                         * into is contiguous to the one we're currently in
                         * if it's not, we have to stop and write what we have
                         */
-                       for (first_dirty = page_index; page_index < num_of_pages; ) {
-                               if ( !UPL_DIRTY_PAGE(pl, page_index) && !UPL_PRECIOUS_PAGE(pl, page_index))
+                       for (first_dirty = page_index;
+                                       page_index < num_of_pages; ) {
+                               if ( !UPL_DIRTY_PAGE(pl, page_index)
+                                       && !UPL_PRECIOUS_PAGE(pl, page_index))
                                        break;
                                page_index++;
                                /*
@@ -2965,17 +2983,21 @@ vs_cluster_write(
                                        int cur_seg;
                                        int nxt_seg;
 
-                                       cur_seg = (page_index - 1) / pages_in_cl;
+                                       cur_seg =
+                                               (page_index - 1) / pages_in_cl;
                                        nxt_seg = page_index / pages_in_cl;
 
                                        if (cur_seg != nxt_seg) {
                                                if ((ps_offset[cur_seg] != (ps_offset[nxt_seg] - cl_size)) || (psp[cur_seg] != psp[nxt_seg]))
-                                                       /*
-                                                        * if the segment we're about to step into
-                                                        * is not contiguous to the one we're currently
-                                                        * in, or it's in a different paging file....
-                                                        * we stop here and generate the I/O
-                                                        */
+                                               /*
+                                                * if the segment we're about
+                                                * to step into is not
+                                                * contiguous to the one we're
+                                                * currently in, or it's in a
+                                                * different paging file....
+                                                * we stop here and generate
+                                                * the I/O
+                                                */
                                                        break;
                                        }
                                }
@@ -2989,23 +3011,30 @@ vs_cluster_write(
                                seg_offset = upl_offset - (seg_index * cl_size);
                                transfer_size = num_dirty * vm_page_size;
 
-                               error = ps_write_file(psp[seg_index], upl, upl_offset,
-                                                     ps_offset[seg_index] + seg_offset, transfer_size, flags);
 
-                               if (error == 0) {
-                                       while (transfer_size) {
-                                               int seg_size;
+                               while (transfer_size) {
+                                       int seg_size;
 
-                                               if ((seg_size = cl_size - (upl_offset % cl_size)) > transfer_size)
-                                                       seg_size = transfer_size;
+                                       if ((seg_size = cl_size - 
+                                               (upl_offset % cl_size)) 
+                                                       > transfer_size)
+                                               seg_size = transfer_size;
 
-                                               ps_vs_write_complete(vs, upl->offset + upl_offset, seg_size, error);
+                                       ps_vs_write_complete(vs, 
+                                               upl->offset + upl_offset, 
+                                               seg_size, error);
 
-                                               transfer_size -= seg_size;
-                                               upl_offset += seg_size;
-                                       }
-                                       must_abort = 0;
+                                       transfer_size -= seg_size;
+                                       upl_offset += seg_size;
                                }
+                               upl_offset = first_dirty * vm_page_size;
+                               transfer_size = num_dirty * vm_page_size;
+                               error = ps_write_file(psp[seg_index], 
+                                               upl, upl_offset,
+                                               ps_offset[seg_index] 
+                                                               + seg_offset, 
+                                               transfer_size, flags);
+                               must_abort = 0;
                        }
                        if (must_abort) {
                                boolean_t empty = FALSE;
@@ -3047,14 +3076,14 @@ vs_cluster_write(
                        /* Assume that the caller has given us contiguous */
                        /* pages */
                        if(cnt) {
+                               ps_vs_write_complete(vs, mobj_target_addr, 
+                                                               cnt, error);
                                error = ps_write_file(ps, internal_upl,
                                                0, actual_offset,
                                                cnt, flags);
                                if (error)
                                        break;
-                               ps_vs_write_complete(vs, mobj_target_addr, 
-                                                               cnt, error);
-                          }
+                       }
                        if (error)
                                break;
                        actual_offset += cnt;
@@ -3191,11 +3220,7 @@ kern_return_t
 ps_vstruct_transfer_from_segment(
        vstruct_t        vs,
        paging_segment_t segment,
-#ifndef ubc_sync_working
-       vm_object_t     transfer_object)
-#else
        upl_t            upl)
-#endif
 {
        struct vs_map   *vsmap;
        struct vs_map   old_vsmap;
@@ -3246,11 +3271,7 @@ vs_changed:
                                        (vm_page_size * (j << vs->vs_clshift))
                                        + clmap_off, 
                                        vm_page_size << vs->vs_clshift,
-#ifndef ubc_sync_working
-                                       transfer_object)
-#else
                                        upl)
-#endif
                                                != KERN_SUCCESS) {
                                   VS_LOCK(vs);
                                   vs->vs_xfer_pending = FALSE;
@@ -3292,11 +3313,7 @@ vs_changed:
                        if(vs_cluster_transfer(vs, 
                                vm_page_size * (j << vs->vs_clshift), 
                                vm_page_size << vs->vs_clshift,
-#ifndef ubc_sync_working
-                               transfer_object) != KERN_SUCCESS) {
-#else
                                upl) != KERN_SUCCESS) {
-#endif
                           VS_LOCK(vs);
                           vs->vs_xfer_pending = FALSE;
                           VS_UNLOCK(vs);
@@ -3356,11 +3373,7 @@ vs_cluster_transfer(
        vstruct_t       vs,
        vm_offset_t     offset,
        vm_size_t       cnt,
-#ifndef ubc_sync_working
-       vm_object_t     transfer_object)
-#else
        upl_t           upl)
-#endif
 {
        vm_offset_t             actual_offset;
        paging_segment_t        ps;
@@ -3376,10 +3389,6 @@ vs_cluster_transfer(
        struct  vs_map          original_read_vsmap;
        struct  vs_map          write_vsmap;
        upl_t                   sync_upl;
-#ifndef ubc_sync_working
-       upl_t                   upl;
-#endif
-
        vm_offset_t     ioaddr;
 
        /* vs_cluster_transfer reads in the pages of a cluster and
@@ -3483,30 +3492,15 @@ vs_cluster_transfer(
 
                if(ps->ps_segtype == PS_PARTITION) {
 /*
-                       NEED TO BE WITH SYNC & NO COMMIT
+                       NEED TO ISSUE WITH SYNC & NO COMMIT
                        error = ps_read_device(ps, actual_offset, &buffer,
                                       size, &residual, flags);
 */
                } else {
-#ifndef ubc_sync_working
-                       int page_list_count = 0;
-
-                       error = vm_object_upl_request(transfer_object, 
-(vm_object_offset_t) (actual_offset & ((vm_page_size << vs->vs_clshift) - 1)),
-                                       size, &upl, NULL, &page_list_count,
-                                       UPL_NO_SYNC | UPL_CLEAN_IN_PLACE 
-                                                   | UPL_SET_INTERNAL);
-                       if (error == KERN_SUCCESS) {
-                               error = ps_read_file(ps, upl, (vm_offset_t) 0, actual_offset, 
-                                                       size, &residual, 0);
-                       }
-                                       
-#else
-                       /* NEED TO BE WITH SYNC & NO COMMIT & NO RDAHEAD*/
+                       /* NEED TO ISSUE WITH SYNC & NO COMMIT */
                        error = ps_read_file(ps, upl, (vm_offset_t) 0, actual_offset, 
                                        size, &residual, 
-                                       (UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD));
-#endif
+                                       (UPL_IOSYNC | UPL_NOCOMMIT));
                }
 
                read_vsmap = *vsmap_ptr;
@@ -3535,20 +3529,8 @@ vs_cluster_transfer(
                        /* the vm_map_copy_page_discard call              */
                        *vsmap_ptr = write_vsmap;
 
-#ifndef ubc_sync_working
-                       error = vm_object_upl_request(transfer_object, 
-                                       (vm_object_offset_t)
-                                       (actual_offset & ((vm_page_size << vs->vs_clshift) - 1)),
-                                        size, &upl, NULL, &page_list_count,
-                                        UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
-                       if(vs_cluster_write(vs, upl, offset, 
-                                       size, TRUE, 0) != KERN_SUCCESS) {
-                               upl_commit(upl, NULL);
-                               upl_deallocate(upl);
-#else
                        if(vs_cluster_write(vs, upl, offset, 
                                        size, TRUE, UPL_IOSYNC | UPL_NOCOMMIT ) != KERN_SUCCESS) {
-#endif
                                error = KERN_FAILURE;
                                if(!(VSM_ISCLR(*vsmap_ptr))) {
                                        /* unmap the new backing store object */