]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vfs/vfs_cluster.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cluster.c
index 07f0dbbd57c4bd708e8c25f7af49055d0f7ab200..ec2eaf7f499bbbb8bfa39d58c86f861c4ef77711 100644 (file)
@@ -1,21 +1,24 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
 #define CL_READ      0x01
 #define CL_ASYNC     0x02
 #define CL_COMMIT    0x04
-#define CL_NOMAP     0x08
 #define CL_PAGEOUT   0x10
 #define CL_AGE       0x20
 #define CL_DUMP      0x40
 #define CL_NOZERO    0x80
 #define CL_PAGEIN    0x100
 #define CL_DEV_MEMORY 0x200
+#define CL_PRESERVE   0x400
+
+
+struct clios {
+        u_int  io_completed;       /* amount of io that has currently completed */
+        u_int  io_issued;          /* amount of io that was successfully issued */
+        int    io_error;           /* error code of first error encountered */
+        int    io_wanted;          /* someone is sleeping waiting for a change in state */
+};
+
+
+static void cluster_zero(upl_t upl, vm_offset_t   upl_offset,
+               int size, struct buf *bp);
+static int cluster_read_x(struct vnode *vp, struct uio *uio,
+               off_t filesize, int devblocksize, int flags);
+static int cluster_write_x(struct vnode *vp, struct uio *uio,
+               off_t oldEOF, off_t newEOF, off_t headOff,
+               off_t tailOff, int devblocksize, int flags);
+static int cluster_nocopy_read(struct vnode *vp, struct uio *uio,
+               off_t filesize, int devblocksize, int flags);
+static int cluster_nocopy_write(struct vnode *vp, struct uio *uio,
+               off_t newEOF, int devblocksize, int flags);
+static int cluster_phys_read(struct vnode *vp, struct uio *uio,
+               off_t filesize, int devblocksize, int flags);
+static int cluster_phys_write(struct vnode *vp, struct uio *uio,
+               off_t newEOF, int devblocksize, int flags);
+static int cluster_align_phys_io(struct vnode *vp, struct uio *uio,
+                addr64_t usr_paddr, int xsize, int devblocksize, int flags);
+static int cluster_push_x(struct vnode *vp, off_t EOF, daddr_t first, daddr_t last, int can_delay);
+static int cluster_try_push(struct vnode *vp, off_t newEOF, int can_delay, int push_all);
+
 
 /*
  * throttle the number of async writes that
@@ -97,12 +130,14 @@ cluster_iodone(bp)
        int         total_size;
        int         total_resid;
        int         upl_offset;
+       int         zero_offset;
        upl_t       upl;
        struct buf *cbp;
        struct buf *cbp_head;
        struct buf *cbp_next;
        struct buf *real_bp;
        struct vnode *vp;
+       struct clios *iostate;
        int         commit_size;
        int         pg_offset;
 
@@ -110,7 +145,7 @@ cluster_iodone(bp)
        cbp_head = (struct buf *)(bp->b_trans_head);
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
-                    cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
+                    (int)cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
 
        for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
                /*
@@ -120,7 +155,7 @@ cluster_iodone(bp)
                if ( !(cbp->b_flags & B_DONE)) {
 
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
-                                    cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
+                                    (int)cbp_head, (int)cbp, cbp->b_bcount, cbp->b_flags, 0);
 
                        return 0;
                }
@@ -135,11 +170,10 @@ cluster_iodone(bp)
        b_flags    = cbp->b_flags;
        real_bp    = cbp->b_real_bp;
        vp         = cbp->b_vp;
+       zero_offset= cbp->b_validend;
+       iostate    = (struct clios *)cbp->b_iostate;
 
        while (cbp) {
-               if (cbp->b_vectorcount > 1)
-                       _FREE(cbp->b_vectorlist, M_SEGMENT);
-
                if ((cbp->b_flags & B_ERROR) && error == 0)
                        error = cbp->b_error;
 
@@ -152,10 +186,32 @@ cluster_iodone(bp)
 
                cbp = cbp_next;
        }
+       if (zero_offset)
+               cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
+
        if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= (ASYNC_THROTTLE / 3))) {
                vp->v_flag &= ~VTHROTTLED;
                wakeup((caddr_t)&vp->v_numoutput);
        }
+       if (iostate) {
+               /*
+                * someone has issued multiple I/Os asynchrounsly
+                * and is waiting for them to complete (streaming)
+                */
+               if (error && iostate->io_error == 0)
+                       iostate->io_error = error;
+
+               iostate->io_completed += total_size;
+
+               if (iostate->io_wanted) {
+                       /*
+                        * someone is waiting for the state of
+                        * this io stream to change
+                        */
+                       iostate->io_wanted = 0;
+                       wakeup((caddr_t)&iostate->io_wanted);
+               }
+       }
        if ((b_flags & B_NEED_IODONE) && real_bp) {
                if (error) {
                        real_bp->b_flags |= B_ERROR;
@@ -169,13 +225,15 @@ cluster_iodone(bp)
                error = EIO;
 
        if (b_flags & B_COMMIT_UPL) {
-               pg_offset   = upl_offset & PAGE_MASK;
+               pg_offset   = upl_offset & PAGE_MASK;
                commit_size = (((pg_offset + total_size) + (PAGE_SIZE - 1)) / PAGE_SIZE) * PAGE_SIZE;
 
-               if (error || (b_flags & B_NOCACHE)) {
+               if (error || (b_flags & B_NOCACHE) || ((b_flags & B_PHYS) && !(b_flags & B_READ))) {
                        int upl_abort_code;
 
-                       if (b_flags & B_PAGEOUT)
+                       if (b_flags & B_PHYS)
+                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
+                       else if ((b_flags & B_PAGEOUT) && (error != ENXIO)) /* transient error */
                                upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
                        else if (b_flags & B_PGIN)
                                upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
@@ -186,13 +244,15 @@ cluster_iodone(bp)
                                        upl_abort_code);
                        
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
-                                    upl, upl_offset - pg_offset, commit_size,
+                                    (int)upl, upl_offset - pg_offset, commit_size,
                                     0x80000000|upl_abort_code, 0);
 
                } else {
                        int upl_commit_flags = UPL_COMMIT_FREE_ON_EMPTY;
 
-                       if ( !(b_flags & B_PAGEOUT))
+                       if (b_flags & B_PHYS)
+                               upl_commit_flags |= UPL_COMMIT_SET_DIRTY;
+                       else if ( !(b_flags & B_PAGEOUT))
                                upl_commit_flags |= UPL_COMMIT_CLEAR_DIRTY;
                        if (b_flags & B_AGE)
                                upl_commit_flags |= UPL_COMMIT_INACTIVATE;
@@ -201,40 +261,45 @@ cluster_iodone(bp)
                                        upl_commit_flags);
 
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
-                                    upl, upl_offset - pg_offset, commit_size,
+                                    (int)upl, upl_offset - pg_offset, commit_size,
                                     upl_commit_flags, 0);
                }
        } else 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
-                            upl, upl_offset, 0, error, 0);
+                            (int)upl, upl_offset, 0, error, 0);
 
        return (error);
 }
 
 
 static void
-cluster_zero(upl, upl_offset, size, flags, bp)
+cluster_zero(upl, upl_offset, size, bp)
        upl_t         upl;
        vm_offset_t   upl_offset;
        int           size;
-       int           flags;
        struct buf   *bp;
 {
         vm_offset_t   io_addr = 0;
+       int           must_unmap = 0;
        kern_return_t kret;
 
-       if ( !(flags & CL_NOMAP)) {
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_NONE,
+                    upl_offset, size, (int)bp, 0, 0);
+
+       if (bp == NULL || bp->b_data == NULL) {
                kret = ubc_upl_map(upl, &io_addr);
                
                if (kret != KERN_SUCCESS)
                        panic("cluster_zero: ubc_upl_map() failed with (%d)", kret);
                if (io_addr == 0) 
                        panic("cluster_zero: ubc_upl_map() mapped 0");
+
+               must_unmap = 1;
        } else
                io_addr = (vm_offset_t)bp->b_data;
        bzero((caddr_t)(io_addr + upl_offset), size);
        
-       if ( !(flags & CL_NOMAP)) {
+       if (must_unmap) {
                kret = ubc_upl_unmap(upl);
 
                if (kret != KERN_SUCCESS)
@@ -243,28 +308,34 @@ cluster_zero(upl, upl_offset, size, flags, bp)
 }
 
 static int
-cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
+cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, real_bp, iostate)
        struct vnode *vp;
        upl_t         upl;
        vm_offset_t   upl_offset;
        off_t         f_offset;
-       int           size;
+       int           non_rounded_size;
+       int           devblocksize;
        int           flags;
        struct buf   *real_bp;
+       struct clios *iostate;
 {
        struct buf   *cbp;
-       struct iovec *iovp;
+       u_int         size;
+       u_int         io_size;
        int           io_flags;
        int           error = 0;
        int           retval = 0;
        struct buf   *cbp_head = 0;
        struct buf   *cbp_tail = 0;
        upl_page_info_t *pl;
+       int buf_count = 0;
        int pg_count;
        int pg_offset;
-       int max_iosize;
-       int max_vectors;
+       u_int max_iosize;
+       u_int max_vectors;
        int priv;
+       int zero_offset = 0;
+       u_int  first_lblkno;
 
        if (flags & CL_READ) {
                io_flags = (B_VECTORLIST | B_READ);
@@ -277,20 +348,29 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
        }
        pl = ubc_upl_pageinfo(upl);
 
-       if (flags & CL_ASYNC)
-               io_flags |= (B_CALL | B_ASYNC);
        if (flags & CL_AGE)
                io_flags |= B_AGE;
        if (flags & CL_DUMP)
                io_flags |= B_NOCACHE;
        if (flags & CL_PAGEIN)
                io_flags |= B_PGIN;
+       if (flags & CL_PAGEOUT)
+               io_flags |= B_PAGEOUT;
+       if (flags & CL_COMMIT)
+               io_flags |= B_COMMIT_UPL;
+       if (flags & CL_PRESERVE)
+               io_flags |= B_PHYS;
+
+       if (devblocksize)
+               size = (non_rounded_size + (devblocksize - 1)) & ~(devblocksize - 1);
+       else
+               size = non_rounded_size;
 
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START,
                     (int)f_offset, size, upl_offset, flags, 0);
 
-       if ((flags & CL_READ) && ((upl_offset + size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
+       if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
                /*
                 * then we are going to end up
                 * with a page that we can't complete (the file size wasn't a multiple
@@ -298,16 +378,10 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                 * so we'll go ahead and zero out the portion of the page we can't
                 * read in from the file
                 */
-               cluster_zero(upl, upl_offset + size, PAGE_SIZE - ((upl_offset + size) & PAGE_MASK), flags, real_bp);
-
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_NONE,
-                            upl_offset + size, PAGE_SIZE - ((upl_offset + size) & PAGE_MASK),
-                            flags, real_bp, 0);
+               zero_offset = upl_offset + non_rounded_size;
        }
        while (size) {
-               size_t io_size;
-               int vsize;
-               int i;
+               int i; 
                int pl_index;
                int pg_resid;
                int num_contig;
@@ -319,14 +393,14 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                else
                        io_size = size;
 
-               if (error = VOP_CMAP(vp, f_offset, io_size, &blkno, &io_size, NULL)) {
+               if (error = VOP_CMAP(vp, f_offset, io_size, &blkno, (size_t *)&io_size, NULL)) {
                        if (error == EOPNOTSUPP)
                                panic("VOP_CMAP Unimplemented");
                        break;
                }
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
-                            (int)f_offset, (int)blkno, io_size, 0, 0);
+                            (int)f_offset, (int)blkno, io_size, zero_offset, 0);
 
                if ( (!(flags & CL_READ) && (long)blkno == -1) || io_size == 0) {
                        if (flags & CL_PAGEOUT) {
@@ -375,19 +449,56 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                        pg_count = 1;
                }
                if ((flags & CL_READ) && (long)blkno == -1) {
+                       int bytes_to_zero;
+
                        /*
                         * if we're reading and blkno == -1, then we've got a
                         * 'hole' in the file that we need to deal with by zeroing
                         * out the affected area in the upl
                         */
-                       cluster_zero(upl, upl_offset, io_size, flags, real_bp);
-
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_NONE,
-                                    upl_offset, io_size, flags, real_bp, 0);
+                       if (zero_offset && io_size == size) {
+                               /*
+                                * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
+                                * than 'zero_offset' will be non-zero
+                                * if the 'hole' returned by VOP_CMAP extends all the way to the eof
+                                * (indicated by the io_size finishing off the I/O request for this UPL)
+                                * than we're not going to issue an I/O for the
+                                * last page in this upl... we need to zero both the hole and the tail
+                                * of the page beyond the EOF, since the delayed zero-fill won't kick in 
+                                */
+                               bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
 
-                       pg_count = (io_size - pg_offset) / PAGE_SIZE;
+                               zero_offset = 0;
+                       } else
+                               bytes_to_zero = io_size;
 
+                       cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
+                         
+                       if (cbp_head)
+                               /*
+                                * if there is a current I/O chain pending
+                                * then the first page of the group we just zero'd
+                                * will be handled by the I/O completion if the zero
+                                * fill started in the middle of the page
+                                */
+                               pg_count = (io_size - pg_offset) / PAGE_SIZE;
+                       else {
+                               /*
+                                * no pending I/O to pick up that first page
+                                * so, we have to make sure it gets committed
+                                * here.
+                                * set the pg_offset to 0 so that the upl_commit_range
+                                * starts with this page
+                                */
+                               pg_count = (io_size + pg_offset) / PAGE_SIZE;
+                               pg_offset = 0;
+                       }
                        if (io_size == size && ((upl_offset + io_size) & PAGE_MASK))
+                               /*
+                                * if we're done with the request for this UPL
+                                * then we have to make sure to commit the last page
+                                * even if we only partially zero-filled it
+                                */
                                pg_count++;
 
                        if (pg_count) {
@@ -395,9 +506,10 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                                        pg_resid = PAGE_SIZE - pg_offset;
                                else
                                        pg_resid = 0;
+
                                if (flags & CL_COMMIT)
                                        ubc_upl_commit_range(upl,
-                                                       upl_offset + pg_resid
+                                                       (upl_offset + pg_resid) & ~PAGE_MASK
                                                        pg_count * PAGE_SIZE,
                                                        UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
                        }
@@ -405,38 +517,22 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                        f_offset   += io_size;
                        size       -= io_size;
 
-                       if (cbp_head && pg_count)
+                       if (cbp_head && pg_count) 
                                goto start_io;
                        continue;
+
                } else if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) {
                        real_bp->b_blkno = blkno;
                }
 
-               if (pg_count > 1) {
-                       if (pg_count > max_vectors) {
-                               io_size -= (pg_count - max_vectors) * PAGE_SIZE;
+               if (pg_count > max_vectors) {
+                       io_size -= (pg_count - max_vectors) * PAGE_SIZE;
 
-                               if (io_size < 0) {
-                                       io_size = PAGE_SIZE - pg_offset;
-                                       pg_count = 1;
-                               } else
-                                       pg_count = max_vectors;
-                       }
-                       /* 
-                        * we need to allocate space for the vector list
-                        */
-                       if (pg_count > 1) {
-                               iovp = (struct iovec *)_MALLOC(sizeof(struct iovec) * pg_count,
-                                                              M_SEGMENT, M_NOWAIT);
-                       
-                               if (iovp == (struct iovec *) 0) {
-                                       /*
-                                        * if the allocation fails, then throttle down to a single page
-                                        */
-                                       io_size = PAGE_SIZE - pg_offset;
-                                       pg_count = 1;
-                               }
-                       }
+                       if (io_size < 0) {
+                               io_size = PAGE_SIZE - pg_offset;
+                               pg_count = 1;
+                       } else
+                               pg_count = max_vectors;
                }
 
                /* Throttle the speculative IO */
@@ -447,53 +543,9 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
 
                cbp = alloc_io_buf(vp, priv);
 
-               if (pg_count == 1)
-                       /*
-                        * we use the io vector that's reserved in the buffer header
-                        * this insures we can always issue an I/O even in a low memory
-                        * condition that prevents the _MALLOC from succeeding... this
-                        * is necessary to prevent deadlocks with the pager
-                        */
-                       iovp = (struct iovec *)(&cbp->b_vects[0]);
-
-               cbp->b_vectorlist  = (void *)iovp;
-               cbp->b_vectorcount = pg_count;
-
-               if (flags & CL_DEV_MEMORY) {
-
-                       iovp->iov_len  = io_size;
-                       iovp->iov_base = (caddr_t)upl_phys_page(pl, 0);
-
-                       if (iovp->iov_base == (caddr_t) 0) {
-                               free_io_buf(cbp);
-                               error = EINVAL;
-                       } else
-                               iovp->iov_base += upl_offset;
-               } else {
-
-                 for (i = 0, vsize = io_size; i < pg_count; i++, iovp++) {
-                       int     psize;
 
-                       psize = PAGE_SIZE - pg_offset;
-
-                       if (psize > vsize)
-                               psize = vsize;
-
-                       iovp->iov_len  = psize;
-                       iovp->iov_base = (caddr_t)upl_phys_page(pl, pl_index + i);
-
-                       if (iovp->iov_base == (caddr_t) 0) {
-                               if (pg_count > 1)
-                                       _FREE(cbp->b_vectorlist, M_SEGMENT);
-                               free_io_buf(cbp);
-
-                               error = EINVAL;
-                               break;
-                       }
-                       iovp->iov_base += pg_offset;
-                       pg_offset = 0;
-
-                       if (flags & CL_PAGEOUT) {
+               if (flags & CL_PAGEOUT) {
+                       for (i = 0; i < pg_count; i++) {
                                int         s;
                                struct buf *bp;
 
@@ -509,14 +561,11 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                                }
                                splx(s);
                        }
-                       vsize -= psize;
-                   }
                }
-               if (error)
-                       break;
-
-               if (flags & CL_ASYNC)
-                       cbp->b_iodone = (void *)cluster_iodone;
+               if (flags & CL_ASYNC) {
+                       cbp->b_flags |= (B_CALL | B_ASYNC);
+                       cbp->b_iodone = (void *)cluster_iodone;
+               }
                cbp->b_flags |= io_flags;
 
                cbp->b_lblkno = lblkno;
@@ -526,6 +575,13 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                cbp->b_uploffset = upl_offset;
                cbp->b_trans_next = (struct buf *)0;
 
+               if (cbp->b_iostate = (void *)iostate)
+                       /*
+                        * caller wants to track the state of this
+                        * io... bump the amount issued against this stream
+                        */
+                       iostate->io_issued += io_size;
+
                if (flags & CL_READ)
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
                                     cbp->b_lblkno, cbp->b_blkno, upl_offset, io_size, 0);
@@ -541,33 +597,41 @@ cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp)
                        cbp_tail = cbp;
                }
                (struct buf *)(cbp->b_trans_head) = cbp_head;
+               buf_count++;
 
                upl_offset += io_size;
                f_offset   += io_size;
                size       -= io_size;
 
-               if ( (!(upl_offset & PAGE_MASK) && !(flags & CL_DEV_MEMORY)) || size == 0) {
+               if ( (!(upl_offset & PAGE_MASK) && !(flags & CL_DEV_MEMORY) && ((flags & CL_ASYNC) || buf_count > 8)) || size == 0) {
                        /*
                         * if we have no more I/O to issue or
                         * the current I/O we've prepared fully
                         * completes the last page in this request
-                        * or it's been completed via a zero-fill
-                        * due to a 'hole' in the file
+                        * and it's either an ASYNC request or 
+                        * we've already accumulated more than 8 I/O's into
+                        * this transaction and it's not an I/O directed to 
+                        * special DEVICE memory
                         * then go ahead and issue the I/O
                         */
 start_io:              
-                       if (flags & CL_COMMIT)
-                               cbp_head->b_flags |= B_COMMIT_UPL;
-                       if (flags & CL_PAGEOUT)
-                               cbp_head->b_flags |= B_PAGEOUT;
-                       if (flags & CL_PAGEIN)
-                               cbp_head->b_flags |= B_PGIN;
-
                        if (real_bp) {
                                cbp_head->b_flags |= B_NEED_IODONE;
                                cbp_head->b_real_bp = real_bp;
-                       }
+                       } else
+                               cbp_head->b_real_bp = (struct buf *)NULL;
 
+                       if (size == 0) {
+                               /*
+                                * we're about to issue the last I/O for this upl
+                                * if this was a read to the eof and the eof doesn't
+                                * finish on a page boundary, than we need to zero-fill
+                                * the rest of the page....
+                                */
+                               cbp_head->b_validend = zero_offset;
+                       } else
+                               cbp_head->b_validend = 0;
+                         
                        for (cbp = cbp_head; cbp;) {
                                struct buf * cbp_next;
 
@@ -575,7 +639,7 @@ start_io:
                                        cbp->b_vp->v_numoutput++;
 
                                cbp_next = cbp->b_trans_next;
-
+                               
                                (void) VOP_STRATEGY(cbp);
                                cbp = cbp_next;
                        }
@@ -584,39 +648,66 @@ start_io:
                                        biowait(cbp);
 
                                if (error = cluster_iodone(cbp_head)) {
-                                       retval = error;
+                                       if ((flags & CL_PAGEOUT) && (error == ENXIO))
+                                               retval = 0;     /* drop the error */
+                                       else
+                                               retval = error;
                                        error  = 0;
                                }
                        }
                        cbp_head = (struct buf *)0;
                        cbp_tail = (struct buf *)0;
+
+                       buf_count = 0;
                }
        }
        if (error) {
                int abort_size;
 
+               io_size = 0;
+               
                for (cbp = cbp_head; cbp;) {
                        struct buf * cbp_next;
  
-                       if (cbp->b_vectorcount > 1)
-                               _FREE(cbp->b_vectorlist, M_SEGMENT);
                        upl_offset -= cbp->b_bcount;
                        size       += cbp->b_bcount;
+                       io_size    += cbp->b_bcount;
 
                        cbp_next = cbp->b_trans_next;
                        free_io_buf(cbp);
                        cbp = cbp_next;
                }
+               if (iostate) {
+                       /*
+                        * update the error condition for this stream
+                        * since we never really issued the io
+                        * just go ahead and adjust it back
+                        */
+                       if (iostate->io_error == 0)
+                               iostate->io_error = error;
+                       iostate->io_issued -= io_size;
+
+                       if (iostate->io_wanted) {
+                               /*
+                                * someone is waiting for the state of
+                                * this io stream to change
+                                */
+                               iostate->io_wanted = 0;
+                               wakeup((caddr_t)&iostate->io_wanted);
+                       }
+               }
                pg_offset  = upl_offset & PAGE_MASK;
                abort_size = ((size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE) * PAGE_SIZE;
 
                if (flags & CL_COMMIT) {
                        int upl_abort_code;
 
-                       if (flags & CL_PAGEOUT)
+                       if (flags & CL_PRESERVE)
+                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
+                       else if ((flags & CL_PAGEOUT) && (error != ENXIO)) /* transient error */
                                upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
                        else if (flags & CL_PAGEIN)
-                           upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
+                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
                        else
                                upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
 
@@ -624,7 +715,7 @@ start_io:
                                                upl_abort_code);
 
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
-                                    upl, upl_offset - pg_offset, abort_size, error, 0);
+                                    (int)upl, upl_offset - pg_offset, abort_size, error, 0);
                }
                if (real_bp) {
                        real_bp->b_flags |= B_ERROR;
@@ -650,14 +741,8 @@ cluster_rd_prefetch(vp, f_offset, size, filesize, devblocksize)
        off_t         filesize;
        int           devblocksize;
 {
-       upl_t         upl;
-       upl_page_info_t *pl;
-       int           pages_in_upl;
-       int           start_pg;
-       int           last_pg;
-       int           last_valid;
-       int           io_size;
-
+       int           pages_to_fetch;
+       int           skipped_pages;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
                     (int)f_offset, size, (int)filesize, 0, 0);
@@ -667,108 +752,29 @@ cluster_rd_prefetch(vp, f_offset, size, filesize, devblocksize)
                             (int)f_offset, 0, 0, 0, 0);
                return(0);
        }
-       if (ubc_page_op(vp, f_offset, 0, 0, 0) == KERN_SUCCESS) {
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
-                            (int)f_offset, 0, 0, 0, 0);
-               return(1);
-       }
        if (size > (MAX_UPL_TRANSFER * PAGE_SIZE))
                size = MAX_UPL_TRANSFER * PAGE_SIZE;
        else
                size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
 
-       if ((off_t)size > (filesize - f_offset))
-               size = ((filesize - f_offset) + (devblocksize - 1)) & ~(devblocksize - 1);
+        if ((off_t)size > (filesize - f_offset))
+                size = filesize - f_offset;
        
-       pages_in_upl = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
-
-       ubc_create_upl(vp,
-                       f_offset,
-                       pages_in_upl * PAGE_SIZE,
-                               &upl, 
-                               &pl,
-                               UPL_FLAGS_NONE);
+       pages_to_fetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
 
-       if (upl == (upl_t) 0)
-               return(0);
-
-       /*
-        * scan from the beginning of the upl looking for the first
-        * non-valid page.... this will become the first page in
-        * the request we're going to make to 'cluster_io'... if all
-        * of the pages are valid, we won't call through to 'cluster_io'
-        */
-       for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
-               if (!upl_valid_page(pl, start_pg))
-                       break;
-       }
-
-       /*
-        * scan from the starting invalid page looking for a valid
-        * page before the end of the upl is reached, if we 
-        * find one, then it will be the last page of the request to
-        * 'cluster_io'
-        */
-       for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
-               if (upl_valid_page(pl, last_pg))
-                       break;
-       }
-
-       /*
-        * if we find any more free valid pages at the tail of the upl
-        * than update maxra accordingly....
-        */
-       for (last_valid = last_pg; last_valid < pages_in_upl; last_valid++) {
-               if (!upl_valid_page(pl, last_valid))
+       for (skipped_pages = 0; skipped_pages < pages_to_fetch; skipped_pages++) {
+               if (ubc_page_op(vp, f_offset, 0, 0, 0) != KERN_SUCCESS)
                        break;
+               f_offset += PAGE_SIZE;
+               size     -= PAGE_SIZE;
        }
-       if (start_pg < last_pg) {               
-               vm_offset_t   upl_offset;
-
-               /*
-                * we found a range of 'invalid' pages that must be filled
-                * 'size' has already been clipped to the LEOF
-                * make sure it's at least a multiple of the device block size
-                */
-               upl_offset = start_pg * PAGE_SIZE;
-               io_size    = (last_pg - start_pg) * PAGE_SIZE;
-
-               if ((upl_offset + io_size) > size) {
-                       io_size = size - upl_offset;
-
-                       KERNEL_DEBUG(0xd001000, upl_offset, size, io_size, 0, 0);
-               }
-               cluster_io(vp, upl, upl_offset, f_offset + upl_offset, io_size,
-                          CL_READ | CL_COMMIT | CL_ASYNC | CL_AGE, (struct buf *)0);
-       }
-       if (start_pg) {
-               /*
-                * start_pg of non-zero indicates we found some already valid pages
-                * at the beginning of the upl.... we need to release these without
-                * modifying there state
-                */
-               ubc_upl_abort_range(upl, 0, start_pg * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
-
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 50)) | DBG_FUNC_NONE,
-                            upl, 0, start_pg * PAGE_SIZE, 0, 0);
-       }
-       if (last_pg < pages_in_upl) {
-               /*
-                * the set of pages that we issued an I/O for did not extend all the
-                * way to the end of the upl... so just release them without modifying
-                * there state
-                */
-               ubc_upl_abort_range(upl, last_pg * PAGE_SIZE, (pages_in_upl - last_pg) * PAGE_SIZE,
-                               UPL_ABORT_FREE_ON_EMPTY);
-
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 50)) | DBG_FUNC_NONE,
-                            upl, last_pg * PAGE_SIZE, (pages_in_upl - last_pg) * PAGE_SIZE, 0, 0);
-       }
+       if (skipped_pages < pages_to_fetch)
+               advisory_read(vp, filesize, f_offset, size, devblocksize);
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
-                    (int)f_offset + (last_valid * PAGE_SIZE), 0, 0, 0, 0);
+                    (int)f_offset + (pages_to_fetch * PAGE_SIZE), skipped_pages, 0, 1, 0);
 
-       return(last_valid);
+       return (pages_to_fetch);
 }
 
 
@@ -795,7 +801,8 @@ cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize)
                return;
        }
 
-       if (vp->v_lastr == -1 || (b_lblkno != vp->v_lastr && b_lblkno != (vp->v_lastr + 1) && b_lblkno != (vp->v_maxra + 1))) {
+       if (vp->v_lastr == -1 || (b_lblkno != vp->v_lastr && b_lblkno != (vp->v_lastr + 1) &&
+                                (b_lblkno != (vp->v_maxra + 1) || vp->v_ralen == 0))) {
                vp->v_ralen = 0;
                vp->v_maxra = 0;
 
@@ -812,7 +819,7 @@ cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize)
                vp->v_ralen = min(max_pages, (e_lblkno + 1) - b_lblkno);
 
        if (e_lblkno < vp->v_maxra) {
-               if ((vp->v_maxra - e_lblkno) > (max_pages / 4)) {
+               if ((vp->v_maxra - e_lblkno) > max(max_pages / 16, 4)) {
 
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
                                     vp->v_ralen, vp->v_maxra, vp->v_lastr, 2, 0);
@@ -822,16 +829,17 @@ cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize)
        r_lblkno = max(e_lblkno, vp->v_maxra) + 1;
        f_offset = (off_t)r_lblkno * PAGE_SIZE_64;
 
-       size_of_prefetch = cluster_rd_prefetch(vp, f_offset, vp->v_ralen * PAGE_SIZE, filesize, devblocksize);
-
-       if (size_of_prefetch)
-               vp->v_maxra = r_lblkno + (size_of_prefetch - 1);
+       if (f_offset < filesize) {
+               size_of_prefetch = cluster_rd_prefetch(vp, f_offset, vp->v_ralen * PAGE_SIZE, filesize, devblocksize);
 
+               if (size_of_prefetch)
+                       vp->v_maxra = (r_lblkno + size_of_prefetch) - 1;
+       }
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
                     vp->v_ralen, vp->v_maxra, vp->v_lastr, 3, 0);
 }
 
-
+int
 cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags)
        struct vnode *vp;
        upl_t         upl;
@@ -852,9 +860,6 @@ cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, fla
        if ((flags & UPL_NOCOMMIT) == 0) 
                local_flags |= CL_COMMIT;
 
-       if (upl == (upl_t) 0)
-               panic("cluster_pageout: can't handle NULL upl yet\n");
-
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
                     (int)f_offset, size, (int)filesize, local_flags, 0);
@@ -869,8 +874,7 @@ cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, fla
 
         if (vp->v_mount->mnt_flag & MNT_RDONLY) {
                if (local_flags & CL_COMMIT)
-                       ubc_upl_abort_range(upl, upl_offset, size,
-                                               UPL_ABORT_FREE_ON_EMPTY);
+                       ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
                return (EROFS);
        }
        /*
@@ -890,7 +894,7 @@ cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, fla
        if (size < max_size)
                io_size = size;
        else
-               io_size = (max_size + (devblocksize - 1)) & ~(devblocksize - 1);
+               io_size = max_size;
 
        pg_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
 
@@ -904,11 +908,11 @@ cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, fla
                tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_pageout", 0);
        }
 
-       return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
-                          local_flags, (struct buf *)0));
+       return (cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize,
+                          local_flags, (struct buf *)0, (struct clios *)0));
 }
 
-
+int
 cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags)
        struct vnode *vp;
        upl_t         upl;
@@ -920,22 +924,19 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag
        int           flags;
 {
        u_int         io_size;
-       int           pg_size;
+       int           rounded_size;
         off_t         max_size;
        int           retval;
        int           local_flags = 0;
 
+       if (upl == NULL || size < 0)
+               panic("cluster_pagein: NULL upl passed in");
 
-       /*
-        * If they didn't ask for any data, then we are done...
-        * we can't issue an abort because we don't know how
-        * big the upl really is
-        */
-       if (size <= 0)
-               return (EINVAL);
-
+       if ((flags & UPL_IOSYNC) == 0)
+               local_flags |= CL_ASYNC;
        if ((flags & UPL_NOCOMMIT) == 0) 
-               local_flags = CL_COMMIT;
+               local_flags |= CL_COMMIT;
+
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
                     (int)f_offset, size, (int)filesize, local_flags, 0);
@@ -947,10 +948,9 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag
         * or the size requested isn't a multiple of PAGE_SIZE
         */
        if (f_offset < 0 || f_offset >= filesize ||
-          (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
-               if (local_flags & CL_COMMIT)
-                       ubc_upl_abort_range(upl, upl_offset, size, 
-                                       UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
+          (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
+               if (local_flags & CL_COMMIT)
+                       ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
                return (EINVAL);
        }
        max_size = filesize - f_offset;
@@ -958,32 +958,16 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag
        if (size < max_size)
                io_size = size;
        else
-               io_size = (max_size + (devblocksize - 1)) & ~(devblocksize - 1);
-
-       pg_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
-
-       if (upl == (upl_t) 0) {
-               ubc_create_upl( vp,
-                                               f_offset,
-                                               pg_size,
-                                               &upl,
-                                               NULL,
-                                               UPL_FLAGS_NONE);
+               io_size = max_size;
 
-               if (upl == (upl_t) 0)
-                       return (EINVAL);
-
-               upl_offset = (vm_offset_t)0;
-               size = pg_size;
-       }
-       if (size > pg_size) {
-               if (local_flags & CL_COMMIT)
-                       ubc_upl_abort_range(upl, upl_offset + pg_size, size - pg_size,
-                                       UPL_ABORT_FREE_ON_EMPTY);
-       }
+       rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
 
-       retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
-                           local_flags | CL_READ | CL_PAGEIN, (struct buf *)0);
+       if (size > rounded_size && (local_flags & CL_COMMIT))
+               ubc_upl_abort_range(upl, upl_offset + rounded_size,
+                                   size - (upl_offset + rounded_size), UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
+       
+       retval = cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize,
+                          local_flags | CL_READ | CL_PAGEIN, (struct buf *)0, (struct clios *)0);
 
        if (retval == 0) {
                int b_lblkno;
@@ -993,7 +977,7 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag
                e_lblkno = (int)
                        ((f_offset + ((off_t)io_size - 1)) / PAGE_SIZE_64);
 
-               if (!(flags & UPL_NORDAHEAD) && !(vp->v_flag & VRAOFF)) {
+               if (!(flags & UPL_NORDAHEAD) && !(vp->v_flag & VRAOFF) && rounded_size == PAGE_SIZE) {
                        /*
                         * we haven't read the last page in of the file yet
                         * so let's try to read ahead if we're in 
@@ -1006,26 +990,29 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag
        return (retval);
 }
 
-
+int
 cluster_bp(bp)
        struct buf *bp;
 {
         off_t  f_offset;
        int    flags;
 
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
+                    (int)bp, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
+
        if (bp->b_pagelist == (upl_t) 0)
                panic("cluster_bp: can't handle NULL upl yet\n");
        if (bp->b_flags & B_READ)
-               flags = CL_ASYNC | CL_NOMAP | CL_READ;
+               flags = CL_ASYNC | CL_READ;
        else
-               flags = CL_ASYNC | CL_NOMAP;
+               flags = CL_ASYNC;
 
        f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
 
-        return (cluster_io(bp->b_vp, bp->b_pagelist, 0, f_offset, bp->b_bcount, flags, bp));
+        return (cluster_io(bp->b_vp, bp->b_pagelist, 0, f_offset, bp->b_bcount, 0, flags, bp, (struct clios *)0));
 }
 
-
+int
 cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
@@ -1049,7 +1036,7 @@ cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
        int           retval = 0;
 
 
-       if ((!uio) || (uio->uio_segflg != UIO_USERSPACE) || (!(vp->v_flag & VNOCACHE_DATA)))
+       if ( (!(vp->v_flag & VNOCACHE_DATA)) || (!uio) || (uio->uio_segflg != UIO_USERSPACE))
          {
            retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
            return(retval);
@@ -1086,14 +1073,6 @@ cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
 
             if (upl_flags & UPL_PHYS_CONTIG)
              {
-               /*
-                * since the interface to the IOKit below us uses physical block #'s and
-                * block counts to specify the I/O, we can't handle anything that isn't
-                * devblocksize aligned 
-                */
-               if ((uio->uio_offset & (devblocksize - 1)) || (uio->uio_resid & (devblocksize - 1)))
-                   return(EINVAL);
-
                if (flags & IO_HEADZEROFILL)
                  {
                    flags &= ~IO_HEADZEROFILL;
@@ -1102,7 +1081,7 @@ cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        return(retval);
                  }
 
-               retval = cluster_phys_write(vp, uio);
+               retval = cluster_phys_write(vp, uio, newEOF, devblocksize, flags);
 
                if (uio->uio_resid == 0 && (flags & IO_TAILZEROFILL))
                  {
@@ -1184,7 +1163,8 @@ cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
        return(retval);
 }
 
-static
+
+static int
 cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
@@ -1198,6 +1178,7 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags)
        vm_offset_t      upl_offset;
        off_t            max_io_size;
        int              io_size;
+       int              io_flag;
        int              upl_size;
        int              upl_needed_size;
        int              pages_in_pl;
@@ -1205,8 +1186,10 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags)
        kern_return_t    kret;
        struct iovec     *iov;
        int              i;
+       int              first = 1;
        int              force_data_sync;
        int              error  = 0;
+       struct clios     iostate;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
                     (int)uio->uio_offset, (int)uio->uio_resid, 
@@ -1218,152 +1201,184 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags)
         *  -- the resid is a page multiple
         *  -- the resid will not exceed iov_len
         */
+       cluster_try_push(vp, newEOF, 0, 1);
+
+       iostate.io_completed = 0;
+       iostate.io_issued = 0;
+       iostate.io_error = 0;
+       iostate.io_wanted = 0;
 
        iov = uio->uio_iov;
 
        while (uio->uio_resid && uio->uio_offset < newEOF && error == 0) {
-         io_size = uio->uio_resid;
-
-          if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
-            io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+               io_size = uio->uio_resid;
 
-         upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
-         upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
+               if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
+                       io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
-                      (int)upl_offset, upl_needed_size, iov->iov_base, io_size, 0);
-
-         for (force_data_sync = 0; force_data_sync < 3; force_data_sync++)
-           {
-             pages_in_pl = 0;
-             upl_size = upl_needed_size;
-             upl_flags = UPL_COPYOUT_FROM | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
-
-             kret = vm_map_get_upl(current_map(),
-                                   (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                                   &upl_size,
-                                       &upl, 
-                                       NULL, 
-                                       &pages_in_pl,
-                                       &upl_flags,
-                                       force_data_sync);
+               if (first) {
+                       if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4)
+                               io_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 8;
+                       first = 0;
+               }
+               upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
+               upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
+
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
+                            (int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
+
+               for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
+                       pages_in_pl = 0;
+                       upl_size = upl_needed_size;
+                       upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
+                                   UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+
+                       kret = vm_map_get_upl(current_map(),
+                                             (vm_offset_t)iov->iov_base & ~PAGE_MASK,
+                                             &upl_size,
+                                             &upl, 
+                                             NULL, 
+                                             &pages_in_pl,
+                                             &upl_flags,
+                                             force_data_sync);
+
+                       if (kret != KERN_SUCCESS) {
+                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+                                            0, 0, 0, kret, 0);
 
-             if (kret != KERN_SUCCESS)
-               {
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
-                              0, 0, 0, kret, 0);
+                               /*
+                                * cluster_nocopy_write: failed to get pagelist
+                                *
+                                * we may have already spun some portion of this request
+                                * off as async requests... we need to wait for the I/O
+                                * to complete before returning
+                                */
+                               goto wait_for_writes;
+                       }
+                       pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
+                       pages_in_pl = upl_size / PAGE_SIZE;
 
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
-                              (int)uio->uio_offset, (int)uio->uio_resid, kret, 1, 0);
+                       for (i = 0; i < pages_in_pl; i++) {
+                               if (!upl_valid_page(pl, i))
+                                       break;            
+                       }
+                       if (i == pages_in_pl)
+                               break;
 
-                 /* cluster_nocopy_write: failed to get pagelist */
-                 /* do not return kret here */
-                 return(0);
+                       /*
+                        * didn't get all the pages back that we
+                        * needed... release this upl and try again
+                        */
+                       ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
+                                           UPL_ABORT_FREE_ON_EMPTY);
                }
+               if (force_data_sync >= 3) {
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+                                    i, pages_in_pl, upl_size, kret, 0);
 
-             pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
-             pages_in_pl = upl_size / PAGE_SIZE;
-
-             for(i=0; i < pages_in_pl; i++)
-               {
-                 if (!upl_valid_page(pl, i))
-                   break;                
+                       /*
+                        * for some reason, we couldn't acquire a hold on all
+                        * the pages needed in the user's address space
+                        *
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_writes;
                }
 
-             if (i == pages_in_pl)
-               break;
-
-               ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                               UPL_ABORT_FREE_ON_EMPTY);
-           }
-
-         if (force_data_sync >= 3)
-           {
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
-                          i, pages_in_pl, upl_size, kret, 0);
-
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
-                          (int)uio->uio_offset, (int)uio->uio_resid, kret, 2, 0);
-             return(0);
-           }
-
-         /*
-          * Consider the possibility that upl_size wasn't satisfied.
-          */
-         if (upl_size != upl_needed_size)
-           io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
-                      (int)upl_offset, upl_size, iov->iov_base, io_size, 0);                  
-
-         if (io_size == 0)
-           {
-             ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                  UPL_ABORT_FREE_ON_EMPTY);
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
-                    (int)uio->uio_offset, uio->uio_resid, 0, 3, 0);
-
-             return(0);
-           }
+               /*
+                * Consider the possibility that upl_size wasn't satisfied.
+                */
+               if (upl_size != upl_needed_size)
+                       io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
 
-         /*
-          * Now look for pages already in the cache
-          * and throw them away.
-          */
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+                            (int)upl_offset, upl_size, (int)iov->iov_base, io_size, 0);                       
 
-         upl_f_offset = uio->uio_offset;   /* this is page aligned in the file */
-         max_io_size = io_size;
+               if (io_size == 0) {
+                       ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
+                                           UPL_ABORT_FREE_ON_EMPTY);
 
-         while (max_io_size) {
+                       /*
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_writes;
+               }
+               /*
+                * Now look for pages already in the cache
+                * and throw them away.
+                */
 
-           /*
-            * Flag UPL_POP_DUMP says if the page is found
-            * in the page cache it must be thrown away.
-            */
-           ubc_page_op(vp, 
-                       upl_f_offset,
-                       UPL_POP_SET | UPL_POP_BUSY | UPL_POP_DUMP,
-                       0, 0);
-           max_io_size  -= PAGE_SIZE;
-           upl_f_offset += PAGE_SIZE;
-         }
+               upl_f_offset = uio->uio_offset;   /* this is page aligned in the file */
+               max_io_size = io_size;
 
-         /*
-          * issue a synchronous write to cluster_io
-          */
+               while (max_io_size) {
+                       /*
+                        * Flag UPL_POP_DUMP says if the page is found
+                        * in the page cache it must be thrown away.
+                        */
+                       ubc_page_op(vp, 
+                                   upl_f_offset,
+                                   UPL_POP_SET | UPL_POP_BUSY | UPL_POP_DUMP,
+                                   0, 0);
+                       max_io_size  -= PAGE_SIZE_64;
+                       upl_f_offset += PAGE_SIZE_64;
+               }
+               /*
+                * we want push out these writes asynchronously so that we can overlap
+                * the preparation of the next I/O
+                * if there are already too many outstanding writes
+                * wait until some complete before issuing the next
+                */
+               while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
+                       iostate.io_wanted = 1;
+                       tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_write", 0);
+               }       
+               if (iostate.io_error) {
+                       /*
+                        * one of the earlier writes we issued ran into a hard error
+                        * don't issue any more writes, cleanup the UPL
+                        * that was just created but not used, then
+                        * go wait for all writes that are part of this stream
+                        * to complete before returning the error to the caller
+                        */
+                       ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
+                                           UPL_ABORT_FREE_ON_EMPTY);
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
-                      (int)upl_offset, (int)uio->uio_offset, io_size, 0, 0);
+                       goto wait_for_writes;
+               }
+               io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT;
 
-         error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
-                            io_size, 0, (struct buf *)0);
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
+                            (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
 
-         if (error == 0) {
-           /*
-            * The cluster_io write completed successfully,
-            * update the uio structure.
-            */
-           iov->iov_base += io_size;
-           iov->iov_len -= io_size;
-           uio->uio_resid -= io_size;
-           uio->uio_offset += io_size;
-         }
-         /*
-          * always 'commit' the I/O via the abort primitive whether the I/O
-          * succeeded cleanly or not... this is necessary to insure that 
-          * we preserve the state of the DIRTY flag on the pages used to
-          * provide the data for the I/O... the state of this flag SHOULD
-          * NOT be changed by a write
-          */
-         ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                             UPL_ABORT_FREE_ON_EMPTY);
+               error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
+                                  io_size, devblocksize, io_flag, (struct buf *)0, &iostate);
 
+               iov->iov_len    -= io_size;
+               iov->iov_base   += io_size;
+               uio->uio_resid  -= io_size;
+               uio->uio_offset += io_size;
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
-                      (int)upl_offset, (int)uio->uio_offset, (int)uio->uio_resid, error, 0);
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
+                            (int)upl_offset, (int)uio->uio_offset, (int)uio->uio_resid, error, 0);
 
        } /* end while */
 
+wait_for_writes:
+       /*
+        * make sure all async writes issued as part of this stream
+        * have completed before we return
+        */
+       while (iostate.io_issued != iostate.io_completed) {
+               iostate.io_wanted = 1;
+               tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_write", 0);
+       }       
+       if (iostate.io_error)
+               error = iostate.io_error;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
                     (int)uio->uio_offset, (int)uio->uio_resid, error, 4, 0);
@@ -1371,13 +1386,20 @@ cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags)
        return (error);
 }
 
-static
-cluster_phys_write(vp, uio)
+
+static int
+cluster_phys_write(vp, uio, newEOF, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
+       off_t        newEOF;
+       int          devblocksize;
+       int          flags;
 {
+       upl_page_info_t *pl;
+       addr64_t             src_paddr;
        upl_t            upl;
        vm_offset_t      upl_offset;
+       int              tail_size;
        int              io_size;
        int              upl_size;
        int              upl_needed_size;
@@ -1392,6 +1414,7 @@ cluster_phys_write(vp, uio)
         *  -- the resid will not exceed iov_len
         *  -- the vector target address is physcially contiguous
         */
+       cluster_try_push(vp, newEOF, 0, 1);
 
        iov = uio->uio_iov;
        io_size = iov->iov_len;
@@ -1400,56 +1423,86 @@ cluster_phys_write(vp, uio)
 
        pages_in_pl = 0;
        upl_size = upl_needed_size;
-       upl_flags = UPL_COPYOUT_FROM | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+       upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | 
+                   UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
 
        kret = vm_map_get_upl(current_map(),
                              (vm_offset_t)iov->iov_base & ~PAGE_MASK,
                              &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
 
-       if (kret != KERN_SUCCESS)
-         {
-           /* cluster_phys_write: failed to get pagelist */
-             /* note: return kret here */
+       if (kret != KERN_SUCCESS) {
+               /*
+                * cluster_phys_write: failed to get pagelist
+                * note: return kret here
+                */
              return(EINVAL);
-         }
-
+       }
        /*
         * Consider the possibility that upl_size wasn't satisfied.
         * This is a failure in the physical memory case.
         */
-       if (upl_size < upl_needed_size)
-         {
-           kernel_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
-           return(EINVAL);
-         }
+       if (upl_size < upl_needed_size) {
+               kernel_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+               return(EINVAL);
+       }
+       pl = ubc_upl_pageinfo(upl);
 
-       /*
-        * issue a synchronous write to cluster_io
-        */
+       src_paddr = (((addr64_t)(int)upl_phys_page(pl, 0)) << 12) + ((addr64_t)iov->iov_base & PAGE_MASK);
+
+       while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
+               int   head_size;
+
+               head_size = devblocksize - (int)(uio->uio_offset & (devblocksize - 1));
+
+               if (head_size > io_size)
+                       head_size = io_size;
+
+               error = cluster_align_phys_io(vp, uio, src_paddr, head_size, devblocksize, 0);
+
+               if (error) {
+                       ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
 
-       error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
-                          io_size, CL_DEV_MEMORY, (struct buf *)0);
+                       return(EINVAL);
+               }
+               upl_offset += head_size;
+               src_paddr  += head_size;
+               io_size    -= head_size;
+       }
+       tail_size = io_size & (devblocksize - 1);
+       io_size  -= tail_size;
 
+       if (io_size) {
+               /*
+                * issue a synchronous write to cluster_io
+                */
+               error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
+                                  io_size, 0, CL_DEV_MEMORY, (struct buf *)0, (struct clios *)0);
+       }
        if (error == 0) {
-         /*
-          * The cluster_io write completed successfully,
-          * update the uio structure and commit.
-          */
-
-         ubc_upl_commit_range(upl, 0, upl_size, UPL_COMMIT_FREE_ON_EMPTY);
-           
-         iov->iov_base += io_size;
-         iov->iov_len -= io_size;
-         uio->uio_resid -= io_size;
-         uio->uio_offset += io_size;
+               /*
+                * The cluster_io write completed successfully,
+                * update the uio structure
+                */
+               uio->uio_resid  -= io_size;
+               iov->iov_len    -= io_size;
+               iov->iov_base   += io_size;
+               uio->uio_offset += io_size;
+               src_paddr       += io_size;
+
+               if (tail_size)
+                       error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, devblocksize, 0);
        }
-       else
-         ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+       /*
+        * just release our hold on the physically contiguous
+        * region without changing any state
+        */
+       ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
 
        return (error);
 }
 
-static
+
+static int
 cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
@@ -1469,7 +1522,6 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
        int              start_offset;
        int              xfer_resid;
        int              io_size;
-       int              io_size_before_rounding;
        int              io_flags;
        vm_offset_t      io_address;
        int              io_offset;
@@ -1585,7 +1637,7 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        panic("cluster_write: failed to get pagelist");
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_NONE,
-                       upl, (int)upl_f_offset, upl_size, start_offset, 0);
+                       (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
 
                if (start_offset && !upl_valid_page(pl, 0)) {
                        int   read_size;
@@ -1597,12 +1649,11 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                         */
                        read_size = PAGE_SIZE;
 
-                       if ((upl_f_offset + read_size) > newEOF) {
+                       if ((upl_f_offset + read_size) > newEOF)
                                read_size = newEOF - upl_f_offset;
-                               read_size = (read_size + (devblocksize - 1)) & ~(devblocksize - 1);
-                       }
-                       retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
-                                           CL_READ, (struct buf *)0);
+
+                       retval = cluster_io(vp, upl, 0, upl_f_offset, read_size, devblocksize,
+                                           CL_READ, (struct buf *)0, (struct clios *)0);
                        if (retval) {
                                /*
                                 * we had an error during the read which causes us to abort
@@ -1611,10 +1662,10 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                 * there state and mark the failed page in error
                                 */
                                ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES);
-                               ubc_upl_abort(upl, 0);
+                               ubc_upl_abort_range(upl, 0, upl_size,  UPL_ABORT_FREE_ON_EMPTY);
 
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
-                                            upl, 0, 0, retval, 0);
+                                            (int)upl, 0, 0, retval, 0);
                                break;
                        }
                }
@@ -1632,12 +1683,11 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
 
                                read_size = PAGE_SIZE;
 
-                               if ((upl_f_offset + upl_offset + read_size) > newEOF) {
+                               if ((upl_f_offset + upl_offset + read_size) > newEOF)
                                        read_size = newEOF - (upl_f_offset + upl_offset);
-                                       read_size = (read_size + (devblocksize - 1)) & ~(devblocksize - 1);
-                               }
-                               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
-                                                   CL_READ, (struct buf *)0);
+
+                               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size, devblocksize,
+                                                   CL_READ, (struct buf *)0, (struct clios *)0);
                                if (retval) {
                                        /*
                                         * we had an error during the read which causes us to abort
@@ -1645,12 +1695,11 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                         * need to release the rest of the pages in the upl without
                                         * modifying there state and mark the failed page in error
                                         */
-                                       ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE,
-                                                       UPL_ABORT_DUMP_PAGES);
-                                       ubc_upl_abort(upl, 0);
+                                       ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES);
+                                       ubc_upl_abort_range(upl, 0,          upl_size,  UPL_ABORT_FREE_ON_EMPTY);
 
                                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
-                                                    upl, 0, 0, retval, 0);
+                                                    (int)upl, 0, 0, retval, 0);
                                        break;
                                }
                        }
@@ -1667,21 +1716,32 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        else
                                bytes_to_zero = xfer_resid;
 
-                       if ( !(flags & IO_NOZEROVALID)) {
+                       if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
                                bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
 
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
                                             (int)upl_f_offset + io_offset, bytes_to_zero,
-                                            (int)zero_cnt, xfer_resid, 0);
+                                            (int)io_offset, xfer_resid, 0);
                        } else {
+                               int zero_pg_index;
+
                                bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
+                               zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
+
+                               if ( !upl_valid_page(pl, zero_pg_index)) {
+                                       bzero((caddr_t)(io_address + io_offset), bytes_to_zero); 
 
-                               if ( !upl_valid_page(pl, (int)(zero_off / PAGE_SIZE_64))) {
+                                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
+                                                    (int)upl_f_offset + io_offset, bytes_to_zero,
+                                                    (int)io_offset, xfer_resid, 0);
+
+                               } else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY &&
+                                          !upl_dirty_page(pl, zero_pg_index)) {
                                        bzero((caddr_t)(io_address + io_offset), bytes_to_zero); 
 
                                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
                                                     (int)upl_f_offset + io_offset, bytes_to_zero,
-                                                    (int)zero_cnt, xfer_resid, 0);
+                                                    (int)io_offset, xfer_resid, 0);
                                }
                        }
                        xfer_resid -= bytes_to_zero;
@@ -1697,13 +1757,15 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
 
                        retval = uiomove((caddr_t)(io_address + io_offset), bytes_to_move, uio);
 
+
                        if (retval) {
                                if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS)
                                        panic("cluster_write: kernel_upl_unmap failed\n");
-                               ubc_upl_abort(upl, UPL_ABORT_DUMP_PAGES);
+
+                               ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
 
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
-                                            upl, 0, 0, retval, 0);
+                                            (int)upl, 0, 0, retval, 0);
                        } else {
                                uio_resid  -= bytes_to_move;
                                xfer_resid -= bytes_to_move;
@@ -1717,20 +1779,32 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        else
                                bytes_to_zero = xfer_resid;
 
-                       if ( !(flags & IO_NOZEROVALID)) {
+                       if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
                                bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
 
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
                                             (int)upl_f_offset + io_offset,
-                                            bytes_to_zero, (int)zero_cnt1, xfer_resid, 0);
+                                            bytes_to_zero, (int)io_offset, xfer_resid, 0);
                        } else {
+                               int zero_pg_index;
+                       
                                bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off1 & PAGE_MASK_64));
-                               if ( !upl_valid_page(pl, (int)(zero_off1 / PAGE_SIZE_64))) {
+                               zero_pg_index = (int)((zero_off1 - upl_f_offset) / PAGE_SIZE_64);
+
+                               if ( !upl_valid_page(pl, zero_pg_index)) {
+                                       bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
+
+                                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
+                                                    (int)upl_f_offset + io_offset,
+                                                    bytes_to_zero, (int)io_offset, xfer_resid, 0);
+
+                               } else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY &&
+                                          !upl_dirty_page(pl, zero_pg_index)) {
                                        bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
 
                                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
                                                     (int)upl_f_offset + io_offset,
-                                                    bytes_to_zero, (int)zero_cnt1, xfer_resid, 0);
+                                                    bytes_to_zero, (int)io_offset, xfer_resid, 0);
                                }
                        }
                        xfer_resid -= bytes_to_zero;
@@ -1740,12 +1814,12 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                }
 
                if (retval == 0) {
-                       int must_push;
+                       int cl_index;
                        int can_delay;
 
                        io_size += start_offset;
 
-                       if ((upl_f_offset + io_size) == newEOF && io_size < upl_size) {
+                       if ((upl_f_offset + io_size) >= newEOF && io_size < upl_size) {
                                /*
                                 * if we're extending the file with this write
                                 * we'll zero fill the rest of the page so that
@@ -1761,138 +1835,181 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS)
                                panic("cluster_write: kernel_upl_unmap failed\n");
 
-                       io_size_before_rounding = io_size;
-
-                       if (io_size & (devblocksize - 1))
-                               io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1);
+                       if (flags & IO_SYNC)
+                               /*
+                                * if the IO_SYNC flag is set than we need to 
+                                * bypass any clusters and immediately issue
+                                * the I/O
+                                */
+                               goto issue_io;
 
-                       must_push = 0;
-                       can_delay = 0;
+                       if (vp->v_clen == 0)
+                               /*
+                                * no clusters currently present
+                                */
+                               goto start_new_cluster;
 
-                       if (vp->v_clen) {
-                               int newsize;
+                       /*
+                        * keep track of the overall dirty page
+                        * range we've developed
+                        * in case we have to fall back to the
+                        * VHASDIRTY method of flushing
+                        */
+                       if (vp->v_flag & VHASDIRTY)
+                               goto delay_io;
 
+                       for (cl_index = 0; cl_index < vp->v_clen; cl_index++) {
                                /*
                                 * we have an existing cluster... see if this write will extend it nicely
                                 */
-                               if (start_blkno >= vp->v_cstart) {
-                                       if (last_blkno <= (vp->v_cstart + vp->v_clen)) {
+                               if (start_blkno >= vp->v_clusters[cl_index].start_pg) {
+                                       /*
+                                        * the current write starts at or after the current cluster
+                                        */
+                                       if (last_blkno <= (vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER)) {
                                                /*
                                                 * we have a write that fits entirely
                                                 * within the existing cluster limits
                                                 */
-                                               if (last_blkno >= vp->v_lastw) {
+                                               if (last_blkno > vp->v_clusters[cl_index].last_pg)
                                                        /*
-                                                        * if we're extending the dirty region within the cluster
-                                                        * we need to update the cluster info... we check for blkno
-                                                        * equality because we may be extending the file with a 
-                                                        * partial write.... this in turn changes our idea of how
-                                                        * much data to write out (v_ciosiz) for the last page
+                                                        * update our idea of where the cluster ends
                                                         */
-                                                       vp->v_lastw = last_blkno;
-                                                       newsize = io_size + ((start_blkno - vp->v_cstart) * PAGE_SIZE);
-
-                                                       if (newsize > vp->v_ciosiz)
-                                                               vp->v_ciosiz = newsize;
-                                               }
-                                               can_delay = 1;
-                                               goto finish_io;
+                                                       vp->v_clusters[cl_index].last_pg = last_blkno;
+                                               break;
                                        }
-                                       if (start_blkno < (vp->v_cstart + vp->v_clen)) {
+                                       if (start_blkno < (vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER)) {
                                                /*
                                                 * we have a write that starts in the middle of the current cluster
                                                 * but extends beyond the cluster's limit
                                                 * we'll clip the current cluster if we actually
-                                                * overlap with the new write and then push it out
+                                                * overlap with the new write
                                                 * and start a new cluster with the current write
                                                 */
-                                                if (vp->v_lastw > start_blkno) {
-                                                       vp->v_lastw = start_blkno;
-                                                       vp->v_ciosiz = (vp->v_lastw - vp->v_cstart) * PAGE_SIZE;
-                                                }
+                                                if (vp->v_clusters[cl_index].last_pg > start_blkno)
+                                                       vp->v_clusters[cl_index].last_pg = start_blkno;
                                        }
                                        /*
                                         * we also get here for the case where the current write starts
                                         * beyond the limit of the existing cluster
+                                        *
+                                        * in either case, we'll check the remaining clusters before 
+                                        * starting a new one
                                         */
-                                       must_push = 1;
-                                       goto check_delay;
-                               }
-                               /*
-                                * the current write starts in front of the current cluster
-                                */
-                               if (last_blkno > vp->v_cstart) {
+                               } else {
                                        /*
-                                        * the current write extends into the existing cluster
+                                        * the current write starts in front of the current cluster
                                         */
-                                       if ((vp->v_lastw - start_blkno) > vp->v_clen) {
+                                       if ((vp->v_clusters[cl_index].last_pg - start_blkno) <=  MAX_UPL_TRANSFER) {
                                                /*
-                                                * if we were to combine this write with the current cluster
-                                                * we would exceed the cluster size limit....
-                                                * clip the current cluster by moving the start position
-                                                * to where the current write ends, and then push it
-                                                */
-                                               vp->v_ciosiz -= (last_blkno - vp->v_cstart) * PAGE_SIZE;
-                                               vp->v_cstart = last_blkno;
-
-                                               /*
-                                                * round up the io_size to the nearest page size
-                                                * since we've coalesced with at least 1 pre-existing
-                                                * page in the current cluster... this write may have ended in the
-                                                * middle of the page which would cause io_size to give us an
-                                                * inaccurate view of how much I/O we actually need to do
+                                                * we can just merge the old cluster
+                                                * with the new request and leave it
+                                                * in the cache
                                                 */
-                                               io_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
+                                               vp->v_clusters[cl_index].start_pg = start_blkno;
 
-                                               must_push = 1;
-                                               goto check_delay;
+                                               if (last_blkno > vp->v_clusters[cl_index].last_pg) {
+                                                       /*
+                                                        * the current write completely
+                                                        * envelops the existing cluster
+                                                        */
+                                                       vp->v_clusters[cl_index].last_pg = last_blkno;
+                                               }
+                                               break;
                                        }
+
                                        /*
-                                        * we can coalesce the current write with the existing cluster
-                                        * adjust the cluster info to reflect this
+                                        * if we were to combine this write with the current cluster
+                                        * we would exceed the cluster size limit.... so,
+                                        * let's see if there's any overlap of the new I/O with
+                                        * the existing cluster...
+                                        * 
                                         */
-                                       if (last_blkno > vp->v_lastw) {
+                                       if (last_blkno > vp->v_clusters[cl_index].start_pg)
                                                /*
-                                                * the current write completey overlaps
-                                                * the existing cluster
+                                                * the current write extends into the existing cluster
+                                                * clip the current cluster by moving the start position
+                                                * to where the current write ends
                                                 */
-                                               vp->v_lastw = last_blkno;
-                                               vp->v_ciosiz = io_size;
-                                       } else {
-                                               vp->v_ciosiz += (vp->v_cstart - start_blkno) * PAGE_SIZE;
-
-                                               if (io_size > vp->v_ciosiz)
-                                                       vp->v_ciosiz = io_size;
-                                       }
-                                       vp->v_cstart = start_blkno;
-                                       can_delay = 1;
-                                       goto finish_io;
+                                               vp->v_clusters[cl_index].start_pg = last_blkno;
+                                       /*
+                                        * if we get here, there was no way to merge
+                                        * the new I/O with this cluster and
+                                        * keep it under our maximum cluster length
+                                        * we'll check the remaining clusters before starting a new one
+                                        */
                                }
-                               /*
-                                * this I/O range is entirely in front of the current cluster
-                                * so we need to push the current cluster out before beginning
+                       }
+                       if (cl_index < vp->v_clen)
+                               /*
+                                * we found an existing cluster that we
+                                * could merger this I/O into
+                                */
+                               goto delay_io;
+
+                       if (vp->v_clen < MAX_CLUSTERS && !(vp->v_flag & VNOCACHE_DATA))
+                               /*
+                                * we didn't find an existing cluster to
+                                * merge into, but there's room to start
                                 * a new one
                                 */
-                               must_push = 1;
-                       }
-check_delay:
-                       if (must_push)
-                               cluster_push(vp);
+                               goto start_new_cluster;
 
-                       if (io_size_before_rounding < (MAX_UPL_TRANSFER * PAGE_SIZE) && !(flags & IO_SYNC)) {
-                               vp->v_clen = MAX_UPL_TRANSFER;
+                       /*
+                        * no exisitng cluster to merge with and no
+                        * room to start a new one... we'll try 
+                        * pushing the existing ones... if none of
+                        * them are able to be pushed, we'll have
+                        * to fall back on the VHASDIRTY mechanism
+                        * cluster_try_push will set v_clen to the
+                        * number of remaining clusters if it is
+                        * unable to push all of them
+                        */
+                       if (vp->v_flag & VNOCACHE_DATA)
+                               can_delay = 0;
+                       else
+                               can_delay = 1;
+
+                       if (cluster_try_push(vp, newEOF, 0, 0) == 0) {
+                               vp->v_flag |= VHASDIRTY;
+                               goto delay_io;
+                       }
+start_new_cluster:
+                       if (vp->v_clen == 0) {
+                               vp->v_ciosiz = devblocksize;
                                vp->v_cstart = start_blkno;
                                vp->v_lastw  = last_blkno;
-                               vp->v_ciosiz = io_size;
-                               
-                               can_delay = 1;
                        }
-finish_io:
-                       if (can_delay) {
-                               ubc_upl_commit_range(upl, 0, upl_size,
-                                                    UPL_COMMIT_SET_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
-                               continue;
+                       vp->v_clusters[vp->v_clen].start_pg = start_blkno;
+                       vp->v_clusters[vp->v_clen].last_pg  = last_blkno;
+                       vp->v_clen++;
+delay_io:
+                       /*
+                        * make sure we keep v_cstart and v_lastw up to 
+                        * date in case we have to fall back on the
+                        * V_HASDIRTY mechanism (or we've already entered it)
+                        */
+                       if (start_blkno < vp->v_cstart)
+                               vp->v_cstart = start_blkno;
+                       if (last_blkno > vp->v_lastw)
+                               vp->v_lastw = last_blkno;
+
+                       ubc_upl_commit_range(upl, 0, upl_size, UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+                       continue;
+issue_io:
+                       /*
+                        * in order to maintain some semblance of coherency with mapped writes
+                        * we need to write the cluster back out as a multiple of the PAGESIZE
+                        * unless the cluster encompasses the last page of the file... in this
+                        * case we'll round out to the nearest device block boundary
+                        */
+                       io_size = upl_size;
+
+                       if ((upl_f_offset + io_size) > newEOF) {
+                               io_size = newEOF - upl_f_offset;
+                               io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1);
                        }
+
                        if (flags & IO_SYNC)
                                io_flags = CL_COMMIT | CL_AGE;
                        else
@@ -1905,8 +2022,8 @@ finish_io:
                                vp->v_flag |= VTHROTTLED;
                                tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_write", 0);
                        }       
-                       retval = cluster_io(vp, upl, 0, upl_f_offset, io_size,
-                                           io_flags, (struct buf *)0);
+                       retval = cluster_io(vp, upl, 0, upl_f_offset, io_size, devblocksize,
+                                           io_flags, (struct buf *)0, (struct clios *)0);
                }
        }
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
@@ -1915,6 +2032,7 @@ finish_io:
        return (retval);
 }
 
+int
 cluster_read(vp, uio, filesize, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
@@ -1981,7 +2099,7 @@ cluster_read(vp, uio, filesize, devblocksize, flags)
 
            if (upl_flags & UPL_PHYS_CONTIG)
              {
-               retval = cluster_phys_read(vp, uio, filesize);
+               retval = cluster_phys_read(vp, uio, filesize, devblocksize, flags);
              }
            else if (uio->uio_resid < 4 * PAGE_SIZE)
              {
@@ -2061,7 +2179,8 @@ cluster_read(vp, uio, filesize, devblocksize, flags)
        return(retval);
 }
 
-static
+
+static int
 cluster_read_x(vp, uio, filesize, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
@@ -2108,7 +2227,7 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                        io_size = uio->uio_resid;
                else
                        io_size = max_size;
-#ifdef ppc
+
                if (uio->uio_segflg == UIO_USERSPACE && !(vp->v_flag & VNOCACHE_DATA)) {
                        segflg = uio->uio_segflg;
 
@@ -2118,8 +2237,8 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                     (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0);
 
                        while (io_size && retval == 0) {
-                               int         xsize;
-                               vm_offset_t paddr;
+                           int         xsize;
+                               ppnum_t paddr;
 
                                if (ubc_page_op(vp,
                                                upl_f_offset,
@@ -2132,7 +2251,7 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                if (xsize > io_size)
                                        xsize = io_size;
 
-                               retval = uiomove((caddr_t)(paddr + start_offset), xsize, uio);
+                               retval = uiomove64((addr64_t)(((addr64_t)paddr << 12) + start_offset), xsize, uio);
 
                                ubc_page_op(vp, upl_f_offset,
                                            UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
@@ -2170,14 +2289,13 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                        }
                        max_size = filesize - uio->uio_offset;
                }
-#endif
                upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
                if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
                        upl_size = MAX_UPL_TRANSFER * PAGE_SIZE;
                pages_in_upl = upl_size / PAGE_SIZE;
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
-                            upl, (int)upl_f_offset, upl_size, start_offset, 0);
+                            (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
 
                kret = ubc_create_upl(vp, 
                                                upl_f_offset,
@@ -2189,7 +2307,7 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                        panic("cluster_read: failed to get pagelist");
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
-                            upl, (int)upl_f_offset, upl_size, start_offset, 0);
+                            (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
 
                /*
                 * scan from the beginning of the upl looking for the first
@@ -2223,16 +2341,15 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                        upl_offset = start_pg * PAGE_SIZE;
                        io_size    = (last_pg - start_pg) * PAGE_SIZE;
 
-                       if ((upl_f_offset + upl_offset + io_size) > filesize) {
+                       if ((upl_f_offset + upl_offset + io_size) > filesize)
                                io_size = filesize - (upl_f_offset + upl_offset);
-                               io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1);
-                       }
+
                        /*
                         * issue a synchronous read to cluster_io
                         */
 
                        error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
-                                          io_size, CL_READ, (struct buf *)0);
+                                          io_size, devblocksize, CL_READ, (struct buf *)0, (struct clios *)0);
                }
                if (error == 0) {
                        /*
@@ -2279,7 +2396,6 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                        cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize);
                                vp->v_lastr = e_lblkno;
                        }
-#ifdef ppc
                        if (uio->uio_segflg == UIO_USERSPACE) {
                                int       offset;
 
@@ -2296,14 +2412,14 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                while (val_size && retval == 0) {
                                        int       csize;
                                        int       i;
-                                       caddr_t   paddr;
+                                       addr64_t        paddr;
 
                                        i = offset / PAGE_SIZE;
                                        csize = min(PAGE_SIZE - start_offset, val_size);
 
-                                       paddr = (caddr_t)upl_phys_page(pl, i) + start_offset;
+                                       paddr = ((addr64_t)upl_phys_page(pl, i) << 12) + start_offset;
 
-                                       retval = uiomove(paddr, csize, uio);
+                                       retval = uiomove64(paddr, csize, uio);
 
                                        val_size    -= csize;
                                        offset      += csize;
@@ -2313,8 +2429,8 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                             (int)uio->uio_offset, val_size, uio->uio_resid, 0, 0);
 
                                uio->uio_segflg = segflg;
-                       } else
-#endif
+                       }
+                       else
                        {
                                if ((kret = ubc_upl_map(upl, &io_address)) != KERN_SUCCESS)
                                        panic("cluster_read: ubc_upl_map() failed\n");
@@ -2334,7 +2450,7 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                        io_size = (last_pg - start_pg) * PAGE_SIZE;
 
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
-                                    upl, start_pg * PAGE_SIZE, io_size, error, 0);
+                                    (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
 
                        if (error || (vp->v_flag & VNOCACHE_DATA))
                                ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
@@ -2346,7 +2462,7 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                                | UPL_COMMIT_INACTIVATE);
 
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END,
-                                    upl, start_pg * PAGE_SIZE, io_size, error, 0);
+                                    (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
                }
                if ((last_pg - start_pg) < pages_in_upl) {
                        int cur_pg;
@@ -2358,10 +2474,10 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                         * there state
                         */
                        if (error)
-                               ubc_upl_abort(upl, 0);
+                               ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
                        else {
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
-                                            upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
+                                            (int)upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
 
                                if (start_pg) {
                                        /*
@@ -2411,11 +2527,12 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                         * that we didn't issue an I/O for, just release them
                                         * unchanged
                                         */
-                                       ubc_upl_abort(upl, 0);
+                                       ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
+                                                           (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
                                }
 
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END,
-                                       upl, -1, -1, 0, 0);
+                                       (int)upl, -1, -1, 0, 0);
                        }
                }
                if (retval == 0)
@@ -2425,7 +2542,8 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
        return (retval);
 }
 
-static
+
+static int
 cluster_nocopy_read(vp, uio, filesize, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
@@ -2443,15 +2561,16 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags)
        int              upl_size;
        int              upl_needed_size;
        int              pages_in_pl;
-       vm_offset_t      paddr;
+       ppnum_t              paddr;
        int              upl_flags;
        kern_return_t    kret;
        int              segflg;
        struct iovec     *iov;
        int              i;
        int              force_data_sync;
-       int              error  = 0;
        int              retval = 0;
+       int              first = 1;
+       struct clios     iostate;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
                     (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0);
@@ -2463,203 +2582,225 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags)
         *  -- the resid will not exceed iov_len
         */
 
+       iostate.io_completed = 0;
+       iostate.io_issued = 0;
+       iostate.io_error = 0;
+       iostate.io_wanted = 0;
+
        iov = uio->uio_iov;
+
        while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) {
 
-         max_io_size = filesize - uio->uio_offset;
+               max_io_size = filesize - uio->uio_offset;
 
-         if (max_io_size < (off_t)((unsigned int)uio->uio_resid))
-             io_size = max_io_size;
-         else
-             io_size = uio->uio_resid;
+               if (max_io_size < (off_t)((unsigned int)uio->uio_resid))
+                       io_size = max_io_size;
+               else
+                       io_size = uio->uio_resid;
 
-         /*
-          * We don't come into this routine unless
-          * UIO_USERSPACE is set.
-          */
-         segflg = uio->uio_segflg;
+               /*
+                * We don't come into this routine unless
+                * UIO_USERSPACE is set.
+                */
+               segflg = uio->uio_segflg;
 
-         uio->uio_segflg = UIO_PHYS_USERSPACE;
+               uio->uio_segflg = UIO_PHYS_USERSPACE;
 
-         /*
-          * First look for pages already in the cache
-          * and move them to user space.
-          */
-         while (io_size && (retval == 0)) {
-           upl_f_offset = uio->uio_offset;
+               /*
+                * First look for pages already in the cache
+                * and move them to user space.
+                */
+               while (io_size && (retval == 0)) {
+                       upl_f_offset = uio->uio_offset;
 
-           /*
-            * If this call fails, it means the page is not
-            * in the page cache.
-            */
-           if (ubc_page_op(vp, upl_f_offset,
-                           UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) != KERN_SUCCESS)
-             break;
+                       /*
+                        * If this call fails, it means the page is not
+                        * in the page cache.
+                        */
+                       if (ubc_page_op(vp, upl_f_offset,
+                                       UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) != KERN_SUCCESS)
+                               break;
 
-           retval = uiomove((caddr_t)(paddr), PAGE_SIZE, uio);
+                       retval = uiomove64((addr64_t)paddr << 12, PAGE_SIZE, uio);
                                
-           ubc_page_op(vp, upl_f_offset, 
-                       UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
+                       ubc_page_op(vp, upl_f_offset, 
+                                   UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
                  
-           io_size     -= PAGE_SIZE;
-           KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 71)) | DBG_FUNC_NONE,
-                          (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0);
-         }
-
-         uio->uio_segflg = segflg;
+                       io_size -= PAGE_SIZE;
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 71)) | DBG_FUNC_NONE,
+                                    (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0);
+               }
+               uio->uio_segflg = segflg;
                        
-         if (retval)
-           {
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                          (int)uio->uio_offset, uio->uio_resid, 2, retval, 0);       
-             return(retval);
-           }
-
-         /* If we are already finished with this read, then return */
-         if (io_size == 0)
-           {
-
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                          (int)uio->uio_offset, uio->uio_resid, 3, io_size, 0);
-             return(0);
-           }
-
-         max_io_size = io_size;
-         if (max_io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
-           max_io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
-
-         start_upl_f_offset = uio->uio_offset;   /* this is page aligned in the file */
-         upl_f_offset = start_upl_f_offset;
-         io_size = 0;
-
-         while(io_size < max_io_size)
-           {
-
-             if(ubc_page_op(vp, upl_f_offset,
-                               UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) == KERN_SUCCESS)
-             {
-                       ubc_page_op(vp, upl_f_offset,
-                           UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
-                       break;
-             }
-
-                 /*
-                  * Build up the io request parameters.
-                  */
-
-                 io_size += PAGE_SIZE;
-                 upl_f_offset += PAGE_SIZE;
+               if (retval) {
+                       /*
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_reads;
                }
+               /*
+                * If we are already finished with this read, then return
+                */
+               if (io_size == 0) {
+                       /*
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_reads;
+               }
+               max_io_size = io_size;
+
+               if (max_io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
+                       max_io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+               if (first) {
+                       if (max_io_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4)
+                               max_io_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 8;
+                       first = 0;
+               }
+               start_upl_f_offset = uio->uio_offset;   /* this is page aligned in the file */
+               upl_f_offset = start_upl_f_offset;
+               io_size = 0;
+
+               while (io_size < max_io_size) {
+                       if (ubc_page_op(vp, upl_f_offset,
+                                       UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) == KERN_SUCCESS) {
+                               ubc_page_op(vp, upl_f_offset,
+                                           UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
+                               break;
+                       }
+                       /*
+                        * Build up the io request parameters.
+                        */
+                       io_size += PAGE_SIZE_64;
+                       upl_f_offset += PAGE_SIZE_64;
+               }
+               if (io_size == 0)
+                       /*
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_reads;
 
-             if (io_size == 0)
-               return(retval);
-
-         upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
-         upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
+               upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
+               upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
-                      (int)upl_offset, upl_needed_size, iov->iov_base, io_size, 0);
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
+                            (int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
 
-         for (force_data_sync = 0; force_data_sync < 3; force_data_sync++)
-           {
-             pages_in_pl = 0;
-             upl_size = upl_needed_size;
-             upl_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+               for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
+                       pages_in_pl = 0;
+                       upl_size = upl_needed_size;
+                       upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
 
-             kret = vm_map_get_upl(current_map(),
-                                   (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                                   &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, force_data_sync);
+                       kret = vm_map_get_upl(current_map(),
+                                             (vm_offset_t)iov->iov_base & ~PAGE_MASK,
+                                             &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, force_data_sync);
 
-             if (kret != KERN_SUCCESS)
-               {
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
-                              (int)upl_offset, upl_size, io_size, kret, 0);
+                       if (kret != KERN_SUCCESS) {
+                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
+                                            (int)upl_offset, upl_size, io_size, kret, 0);
                  
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                              (int)uio->uio_offset, uio->uio_resid, 4, retval, 0);
-
-                 /* cluster_nocopy_read: failed to get pagelist */
-                 /* do not return kret here */
-                 return(retval);
-               }
+                               /*
+                                * cluster_nocopy_read: failed to get pagelist
+                                *
+                                * we may have already spun some portion of this request
+                                * off as async requests... we need to wait for the I/O
+                                * to complete before returning
+                                */
+                               goto wait_for_reads;
+                       }
+                       pages_in_pl = upl_size / PAGE_SIZE;
+                       pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
 
-             pages_in_pl = upl_size / PAGE_SIZE;
-             pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
+                       for (i = 0; i < pages_in_pl; i++) {
+                               if (!upl_valid_page(pl, i))
+                                       break;            
+                       }
+                       if (i == pages_in_pl)
+                               break;
 
-             for(i=0; i < pages_in_pl; i++)
-               {
-                 if (!upl_valid_page(pl, i))
-                   break;                
+                       ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
+                                           UPL_ABORT_FREE_ON_EMPTY);
                }
-             if (i == pages_in_pl)
-               break;
-
-             ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                 UPL_ABORT_FREE_ON_EMPTY);
-           }
-
-         if (force_data_sync >= 3)
-           {
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
-                              (int)upl_offset, upl_size, io_size, kret, 0);
+               if (force_data_sync >= 3) {
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
+                                    (int)upl_offset, upl_size, io_size, kret, 0);
                  
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                              (int)uio->uio_offset, uio->uio_resid, 5, retval, 0);
-             return(retval);
-           }
-         /*
-          * Consider the possibility that upl_size wasn't satisfied.
-          */
-         if (upl_size != upl_needed_size)
-           io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
-
-         if (io_size == 0)
-           {
-             ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                  UPL_ABORT_FREE_ON_EMPTY);
-             return(retval);
-           }
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
-                      (int)upl_offset, upl_size, io_size, kret, 0);
-
-         /*
-          * issue a synchronous read to cluster_io
-          */
+                       goto wait_for_reads;
+               }
+               /*
+                * Consider the possibility that upl_size wasn't satisfied.
+                */
+               if (upl_size != upl_needed_size)
+                       io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
-                      upl, (int)upl_offset, (int)start_upl_f_offset, io_size, 0);
+               if (io_size == 0) {
+                       ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
+                                           UPL_ABORT_FREE_ON_EMPTY);
+                       goto wait_for_reads;
+               }
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
+                            (int)upl_offset, upl_size, io_size, kret, 0);
 
-         error = cluster_io(vp, upl, upl_offset, start_upl_f_offset,
-                            io_size, CL_READ| CL_NOZERO, (struct buf *)0);
+               /*
+                * request asynchronously so that we can overlap
+                * the preparation of the next I/O
+                * if there are already too many outstanding reads
+                * wait until some have completed before issuing the next read
+                */
+               while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
+                       iostate.io_wanted = 1;
+                       tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_read", 0);
+               }       
+               if (iostate.io_error) {
+                       /*
+                        * one of the earlier reads we issued ran into a hard error
+                        * don't issue any more reads, cleanup the UPL
+                        * that was just created but not used, then
+                        * go wait for any other reads to complete before
+                        * returning the error to the caller
+                        */
+                       ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
+                                           UPL_ABORT_FREE_ON_EMPTY);
 
-         if (error == 0) {
-           /*
-            * The cluster_io read completed successfully,
-            * update the uio structure and commit.
-            */
+                       goto wait_for_reads;
+               }
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
+                            (int)upl, (int)upl_offset, (int)start_upl_f_offset, io_size, 0);
 
-           ubc_upl_commit_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                       UPL_COMMIT_SET_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
-           
-           iov->iov_base += io_size;
-           iov->iov_len -= io_size;
-           uio->uio_resid -= io_size;
-           uio->uio_offset += io_size;
-         }
-         else {
-           ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                  UPL_ABORT_FREE_ON_EMPTY);
-         }
+               retval = cluster_io(vp, upl, upl_offset, start_upl_f_offset,
+                                  io_size, devblocksize,
+                                  CL_PRESERVE | CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO,
+                                  (struct buf *)0, &iostate);
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
-                      upl, (int)uio->uio_offset, (int)uio->uio_resid, error, 0);
+               /*
+                * update the uio structure
+                */
+               iov->iov_base   += io_size;
+               iov->iov_len    -= io_size;
+               uio->uio_resid  -= io_size;
+               uio->uio_offset += io_size;
 
-         if (retval == 0)
-           retval = error;
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
+                            (int)upl, (int)uio->uio_offset, (int)uio->uio_resid, retval, 0);
 
        } /* end while */
 
+wait_for_reads:
+       /*
+        * make sure all async reads that are part of this stream
+        * have completed before we return
+        */
+       while (iostate.io_issued != iostate.io_completed) {
+               iostate.io_wanted = 1;
+               tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_read", 0);
+       }       
+       if (iostate.io_error)
+               retval = iostate.io_error;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
                     (int)uio->uio_offset, (int)uio->uio_resid, 6, retval, 0);
@@ -2668,22 +2809,28 @@ cluster_nocopy_read(vp, uio, filesize, devblocksize, flags)
 }
 
 
-static
-cluster_phys_read(vp, uio, filesize)
+static int
+cluster_phys_read(vp, uio, filesize, devblocksize, flags)
        struct vnode *vp;
        struct uio   *uio;
        off_t        filesize;
+       int          devblocksize;
+       int          flags;
 {
+       upl_page_info_t *pl;
        upl_t            upl;
        vm_offset_t      upl_offset;
+       addr64_t             dst_paddr;
        off_t            max_size;
        int              io_size;
+       int              tail_size;
        int              upl_size;
        int              upl_needed_size;
        int              pages_in_pl;
        int              upl_flags;
        kern_return_t    kret;
        struct iovec     *iov;
+       struct clios     iostate;
        int              error;
 
        /*
@@ -2696,68 +2843,134 @@ cluster_phys_read(vp, uio, filesize)
 
        max_size = filesize - uio->uio_offset;
 
-       if (max_size < (off_t)((unsigned int)iov->iov_len))
-           io_size = max_size;
+       if (max_size > (off_t)((unsigned int)iov->iov_len))
+               io_size = iov->iov_len;
        else
-           io_size = iov->iov_len;
+               io_size = max_size;
 
        upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
        upl_needed_size = upl_offset + io_size;
 
+       error       = 0;
        pages_in_pl = 0;
        upl_size = upl_needed_size;
-       upl_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+       upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
 
        kret = vm_map_get_upl(current_map(),
                              (vm_offset_t)iov->iov_base & ~PAGE_MASK,
                              &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
 
-       if (kret != KERN_SUCCESS)
-         {
-           /* cluster_phys_read: failed to get pagelist */
-           return(EINVAL);
-         }
+       if (kret != KERN_SUCCESS) {
+               /*
+                * cluster_phys_read: failed to get pagelist
+                */
+               return(EINVAL);
+       }
+       if (upl_size < upl_needed_size) {
+               /*
+                * The upl_size wasn't satisfied.
+                */
+               ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+
+               return(EINVAL);
+       }
+       pl = ubc_upl_pageinfo(upl);
+
+       dst_paddr = (((addr64_t)(int)upl_phys_page(pl, 0)) << 12) + ((addr64_t)iov->iov_base & PAGE_MASK);
+
+       while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
+               int   head_size;
+
+               head_size = devblocksize - (int)(uio->uio_offset & (devblocksize - 1));
+
+               if (head_size > io_size)
+                       head_size = io_size;
+
+               error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, devblocksize, CL_READ);
+
+               if (error) {
+                       ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+
+                       return(EINVAL);
+               }
+               upl_offset += head_size;
+               dst_paddr  += head_size;
+               io_size    -= head_size;
+       }
+       tail_size = io_size & (devblocksize - 1);
+       io_size  -= tail_size;
+
+       iostate.io_completed = 0;
+       iostate.io_issued = 0;
+       iostate.io_error = 0;
+       iostate.io_wanted = 0;
+
+       while (io_size && error == 0) {
+               int  xsize;
 
+               if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
+                       xsize = MAX_UPL_TRANSFER * PAGE_SIZE;
+               else
+                       xsize = io_size;
+               /*
+                * request asynchronously so that we can overlap
+                * the preparation of the next I/O... we'll do
+                * the commit after all the I/O has completed
+                * since its all issued against the same UPL
+                * if there are already too many outstanding reads
+                * wait until some have completed before issuing the next
+                */
+               while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
+                       iostate.io_wanted = 1;
+                       tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_phys_read", 0);
+               }       
+
+               error = cluster_io(vp, upl, upl_offset, uio->uio_offset, xsize, 0, 
+                                  CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC,
+                                  (struct buf *)0, &iostate);
+               /*
+                * The cluster_io read was issued successfully,
+                * update the uio structure
+                */
+               if (error == 0) {
+                       uio->uio_resid  -= xsize;
+                       iov->iov_len    -= xsize;
+                       iov->iov_base   += xsize;
+                       uio->uio_offset += xsize;
+                       dst_paddr       += xsize;
+                       upl_offset      += xsize;
+                       io_size         -= xsize;
+               }
+       }
        /*
-        * Consider the possibility that upl_size wasn't satisfied.
+        * make sure all async reads that are part of this stream
+        * have completed before we proceed
         */
-       if (upl_size < upl_needed_size)
-         {
-           ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
-           return(EINVAL);
-         }
+       while (iostate.io_issued != iostate.io_completed) {
+               iostate.io_wanted = 1;
+               tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_phys_read", 0);
+       }       
+       if (iostate.io_error) {
+               error = iostate.io_error;
+       }
+       if (error == 0 && tail_size)
+               error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, devblocksize, CL_READ);
 
        /*
-        * issue a synchronous read to cluster_io
+        * just release our hold on the physically contiguous
+        * region without changing any state
         */
-
-       error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
-                          io_size,  CL_READ| CL_NOZERO | CL_DEV_MEMORY, (struct buf *)0);
-
-       if (error == 0)
-         {
-           /*
-            * The cluster_io read completed successfully,
-            * update the uio structure and commit.
-            */
-
-           ubc_upl_commit_range(upl, 0, upl_size, UPL_COMMIT_FREE_ON_EMPTY);
-           
-           iov->iov_base += io_size;
-           iov->iov_len -= io_size;
-           uio->uio_resid -= io_size;
-           uio->uio_offset += io_size;
-         }
-       else
-           ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+       ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
        
        return (error);
 }
 
+
 /*
  * generate advisory I/O's in the largest chunks possible
  * the completed pages will be released into the VM cache
  */
+int
 advisory_read(vp, filesize, f_offset, resid, devblocksize)
        struct vnode *vp;
        off_t         filesize;
@@ -2778,7 +2991,7 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
        int              io_size;
        kern_return_t    kret;
        int              retval = 0;
-
+       int              issued_io;
 
        if (!UBCINFOEXISTS(vp))
                return(EINVAL);
@@ -2814,88 +3027,83 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
                                                upl_size,
                                                &upl,
                                                &pl,
-                                               UPL_FLAGS_NONE);
+                                               UPL_RET_ONLY_ABSENT);
                if (kret != KERN_SUCCESS)
-                       panic("advisory_read: failed to get pagelist");
-
-
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_NONE,
-                            upl, (int)upl_f_offset, upl_size, start_offset, 0);
+                       return(retval);
+               issued_io = 0;
 
                /*
-                * scan from the beginning of the upl looking for the first
-                * non-valid page.... this will become the first page in
-                * the request we're going to make to 'cluster_io'... if all
-                * of the pages are valid, we won't call through to 'cluster_io'
+                * before we start marching forward, we must make sure we end on 
+                * a present page, otherwise we will be working with a freed
+                * upl
                 */
-               for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
-                       if (!upl_valid_page(pl, start_pg))
-                               break;
+               for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
+                       if (upl_page_present(pl, last_pg))
+                               break;
                }
+               pages_in_upl = last_pg + 1;
 
-               /*
-                * scan from the starting invalid page looking for a valid
-                * page before the end of the upl is reached, if we 
-                * find one, then it will be the last page of the request to
-                * 'cluster_io'
-                */
-               for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
-                       if (upl_valid_page(pl, last_pg))
-                               break;
-               }
 
-               if (start_pg < last_pg) {               
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_NONE,
+                            (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
+
+
+               for (last_pg = 0; last_pg < pages_in_upl; ) {
                        /*
-                        * we found a range of 'invalid' pages that must be filled
-                        * if the last page in this range is the last page of the file
-                        * we may have to clip the size of it to keep from reading past
-                        * the end of the last physical block associated with the file
+                        * scan from the beginning of the upl looking for the first
+                        * page that is present.... this will become the first page in
+                        * the request we're going to make to 'cluster_io'... if all
+                        * of the pages are absent, we won't call through to 'cluster_io'
                         */
-                       upl_offset = start_pg * PAGE_SIZE;
-                       io_size    = (last_pg - start_pg) * PAGE_SIZE;
-
-                       if ((upl_f_offset + upl_offset + io_size) > filesize) {
-                               io_size = filesize - (upl_f_offset + upl_offset);
-                               io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1);
+                       for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
+                               if (upl_page_present(pl, start_pg))
+                                       break;
                        }
-                       /*
-                        * issue an asynchronous read to cluster_io
-                        */
-                       retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
-                                         CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE, (struct buf *)0);
-               }
-               if (start_pg) {
-                       /*
-                        * start_pg of non-zero indicates we found some already valid pages
-                        * at the beginning of the upl.... we need to release these without
-                        * modifying there state
-                        */
-                       ubc_upl_abort_range(upl, 0, start_pg * PAGE_SIZE,
-                                       UPL_ABORT_FREE_ON_EMPTY);
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 62)) | DBG_FUNC_NONE,
-                                   upl, 0, start_pg * PAGE_SIZE, 0, 0);
-               }
-               if (last_pg < pages_in_upl) {
                        /*
-                        * the set of pages that we issued an I/O for did not extend all the
-                        * way to the end of the upl..so just release them without modifying
-                        * there state
+                        * scan from the starting present page looking for an absent
+                        * page before the end of the upl is reached, if we 
+                        * find one, then it will terminate the range of pages being
+                        * presented to 'cluster_io'
                         */
-                       ubc_upl_abort_range(upl, last_pg * PAGE_SIZE, (pages_in_upl - last_pg) * PAGE_SIZE,
-                                       UPL_ABORT_FREE_ON_EMPTY);
+                       for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
+                               if (!upl_page_present(pl, last_pg))
+                                       break;
+                       }
+
+                       if (last_pg > start_pg) {               
+                               /*
+                                * we found a range of pages that must be filled
+                                * if the last page in this range is the last page of the file
+                                * we may have to clip the size of it to keep from reading past
+                                * the end of the last physical block associated with the file
+                                */
+                               upl_offset = start_pg * PAGE_SIZE;
+                               io_size    = (last_pg - start_pg) * PAGE_SIZE;
+
+                               if ((upl_f_offset + upl_offset + io_size) > filesize)
+                                       io_size = filesize - (upl_f_offset + upl_offset);
+
+                               /*
+                                * issue an asynchronous read to cluster_io
+                                */
+                               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, devblocksize,
+                                                   CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE, (struct buf *)0, (struct clios *)0);
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 63)) | DBG_FUNC_NONE,
-                                    upl, last_pg * PAGE_SIZE,
-                                    (pages_in_upl - last_pg) * PAGE_SIZE, 0, 0);
+                               issued_io = 1;
+                       }
                }
-               io_size = (last_pg * PAGE_SIZE) - start_offset;
+               if (issued_io == 0)
+                       ubc_upl_abort(upl, 0);
+
+               io_size = upl_size - start_offset;
                
                if (io_size > resid)
                        io_size = resid;
                f_offset += io_size;
                resid    -= io_size;
        }
+
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
                     (int)f_offset, resid, retval, 0, 0);
 
@@ -2903,8 +3111,172 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
 }
 
 
+int
 cluster_push(vp)
         struct vnode *vp;
+{
+        int  retval;
+
+       if (!UBCINFOEXISTS(vp) || vp->v_clen == 0) {
+               vp->v_flag &= ~VHASDIRTY;
+               return(0);
+       }
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
+                    vp->v_flag & VHASDIRTY, vp->v_clen, 0, 0, 0);
+
+       if (vp->v_flag & VHASDIRTY) {
+               daddr_t start_pg;
+               daddr_t last_pg;
+               daddr_t end_pg;
+
+               start_pg = vp->v_cstart;
+               end_pg   = vp->v_lastw;
+
+               vp->v_flag &= ~VHASDIRTY;
+               vp->v_clen = 0;
+
+               while (start_pg < end_pg) {
+                       last_pg = start_pg + MAX_UPL_TRANSFER;
+
+                       if (last_pg > end_pg)
+                               last_pg = end_pg;
+
+                       cluster_push_x(vp, ubc_getsize(vp), start_pg, last_pg, 0);
+
+                       start_pg = last_pg;
+               }
+               return (1);
+       }
+       retval = cluster_try_push(vp, ubc_getsize(vp), 0, 1);
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
+                    vp->v_flag & VHASDIRTY, vp->v_clen, retval, 0, 0);
+
+       return (retval);
+}
+
+
+static int
+cluster_try_push(vp, EOF, can_delay, push_all)
+        struct vnode *vp;
+       off_t  EOF;
+       int    can_delay;
+       int    push_all;
+{
+        int cl_index;
+       int cl_index1;
+       int min_index;
+        int cl_len;
+       int cl_total;
+       int cl_pushed;
+       struct v_cluster l_clusters[MAX_CLUSTERS];
+
+       /*
+        * make a local 'sorted' copy of the clusters
+        * and clear vp->v_clen so that new clusters can
+        * be developed
+        */
+       for (cl_index = 0; cl_index < vp->v_clen; cl_index++) {
+               for (min_index = -1, cl_index1 = 0; cl_index1 < vp->v_clen; cl_index1++) {
+                       if (vp->v_clusters[cl_index1].start_pg == vp->v_clusters[cl_index1].last_pg)
+                               continue;
+                       if (min_index == -1)
+                               min_index = cl_index1;
+                       else if (vp->v_clusters[cl_index1].start_pg < vp->v_clusters[min_index].start_pg)
+                               min_index = cl_index1;
+               }
+               if (min_index == -1)
+                       break;
+               l_clusters[cl_index].start_pg = vp->v_clusters[min_index].start_pg;
+               l_clusters[cl_index].last_pg  = vp->v_clusters[min_index].last_pg;
+
+               vp->v_clusters[min_index].start_pg = vp->v_clusters[min_index].last_pg;
+       }
+       cl_len     = cl_index;
+       vp->v_clen = 0;
+
+       for (cl_pushed = 0, cl_index = 0; cl_index < cl_len; cl_index++) {
+               /*
+                * try to push each cluster in turn...  cluster_push_x may not
+                * push the cluster if can_delay is TRUE and the cluster doesn't
+                * meet the critera for an immediate push
+                */
+               if (cluster_push_x(vp, EOF, l_clusters[cl_index].start_pg, l_clusters[cl_index].last_pg, can_delay)) {
+                       l_clusters[cl_index].start_pg = 0;
+                       l_clusters[cl_index].last_pg  = 0;
+
+                       cl_pushed++;
+
+                       if (push_all == 0)
+                               break;
+               }
+       }
+       if (cl_len > cl_pushed) {
+              /*
+               * we didn't push all of the clusters, so
+               * lets try to merge them back in to the vnode
+               */
+               if ((MAX_CLUSTERS - vp->v_clen) < (cl_len - cl_pushed)) {
+                       /*
+                        * we picked up some new clusters while we were trying to
+                        * push the old ones (I don't think this can happen because
+                        * I'm holding the lock, but just in case)... the sum of the
+                        * leftovers plus the new cluster count exceeds our ability
+                        * to represent them, so fall back to the VHASDIRTY mechanism
+                        */
+                       for (cl_index = 0; cl_index < cl_len; cl_index++) {
+                               if (l_clusters[cl_index].start_pg == l_clusters[cl_index].last_pg)
+                                       continue;
+
+                               if (l_clusters[cl_index].start_pg < vp->v_cstart)
+                                       vp->v_cstart = l_clusters[cl_index].start_pg;
+                               if (l_clusters[cl_index].last_pg > vp->v_lastw)
+                                       vp->v_lastw = l_clusters[cl_index].last_pg;
+                       }
+                       vp->v_flag |= VHASDIRTY;
+               } else {
+                       /*
+                        * we've got room to merge the leftovers back in
+                        * just append them starting at the next 'hole'
+                        * represented by vp->v_clen
+                        */
+                       for (cl_index = 0, cl_index1 = vp->v_clen; cl_index < cl_len; cl_index++) {
+                               if (l_clusters[cl_index].start_pg == l_clusters[cl_index].last_pg)
+                                       continue;
+
+                               vp->v_clusters[cl_index1].start_pg = l_clusters[cl_index].start_pg;
+                               vp->v_clusters[cl_index1].last_pg  = l_clusters[cl_index].last_pg;
+
+                               if (cl_index1 == 0) {
+                                       vp->v_cstart = l_clusters[cl_index].start_pg;
+                                       vp->v_lastw  = l_clusters[cl_index].last_pg;
+                               } else {
+                                       if (l_clusters[cl_index].start_pg < vp->v_cstart)
+                                               vp->v_cstart = l_clusters[cl_index].start_pg;
+                                       if (l_clusters[cl_index].last_pg > vp->v_lastw)
+                                               vp->v_lastw = l_clusters[cl_index].last_pg;
+                               }
+                               cl_index1++;
+                       }
+                       /*
+                        * update the cluster count
+                        */
+                       vp->v_clen = cl_index1;
+               }
+       }
+       return(MAX_CLUSTERS - vp->v_clen);
+}
+
+
+
+static int
+cluster_push_x(vp, EOF, first, last, can_delay)
+        struct vnode *vp;
+       off_t  EOF;
+       daddr_t first;
+       daddr_t last;
+       int    can_delay;
 {
        upl_page_info_t *pl;
        upl_t            upl;
@@ -2920,28 +3292,62 @@ cluster_push(vp)
        kern_return_t    kret;
 
 
-       if (!UBCINFOEXISTS(vp))
-               return(0);
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
+                    vp->v_clen, first, last, EOF, 0);
 
-        if (vp->v_clen == 0 || (pages_in_upl = vp->v_lastw - vp->v_cstart) == 0)
-               return (0);
+       if ((pages_in_upl = last - first) == 0) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
+
+               return (1);
+       }
        upl_size = pages_in_upl * PAGE_SIZE;
-       upl_f_offset = ((off_t)vp->v_cstart) * PAGE_SIZE_64;
-       size = vp->v_ciosiz;
-       vp->v_clen = 0;
+       upl_f_offset = ((off_t)first) * PAGE_SIZE_64;
+
+       if (upl_f_offset + upl_size >= EOF) {
+
+               if (upl_f_offset >= EOF) {
+                       /*
+                        * must have truncated the file and missed 
+                        * clearing a dangling cluster (i.e. it's completely
+                        * beyond the new EOF
+                        */
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
 
-       if (size > upl_size || (upl_size - size) > PAGE_SIZE)
-               panic("cluster_push: v_ciosiz doesn't match size of cluster\n");
+                       return(1);
+               }
+               size = EOF - upl_f_offset;
 
+               upl_size = (size + (PAGE_SIZE - 1) ) & ~(PAGE_SIZE - 1);
+               pages_in_upl = upl_size / PAGE_SIZE;
+       } else {
+               if (can_delay && (pages_in_upl < (MAX_UPL_TRANSFER - (MAX_UPL_TRANSFER / 2))))
+                       return(0);
+               size = upl_size;
+       }
        kret = ubc_create_upl(vp, 
                                upl_f_offset,
                                upl_size,
                                &upl,
-                                       &pl,
-                                       UPL_FLAGS_NONE);
+                               &pl,
+                               UPL_RET_ONLY_DIRTY);
        if (kret != KERN_SUCCESS)
                panic("cluster_push: failed to get pagelist");
 
+       if (can_delay) {
+               int  num_of_dirty;
+       
+               for (num_of_dirty = 0, start_pg = 0; start_pg < pages_in_upl; start_pg++) {
+                       if (upl_valid_page(pl, start_pg) && upl_dirty_page(pl, start_pg))
+                               num_of_dirty++;
+               }
+               if (num_of_dirty < pages_in_upl / 2) {
+                       ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 0, 2, num_of_dirty, (pages_in_upl / 2), 0);
+
+                       return(0);
+               }
+       }
        last_pg = 0;
 
        while (size) {
@@ -2978,9 +3384,80 @@ cluster_push(vp)
                        vp->v_flag |= VTHROTTLED;
                        tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_push", 0);
                }
-               cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, io_flags, (struct buf *)0);
+               cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, vp->v_ciosiz, io_flags, (struct buf *)0, (struct clios *)0);
 
                size -= io_size;
        }
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, 0, 0, 0);
+
        return(1);
 }
+
+
+
+static int
+cluster_align_phys_io(struct vnode *vp, struct uio *uio, addr64_t usr_paddr, int xsize, int devblocksize, int flags)
+{
+        struct iovec     *iov;
+        upl_page_info_t  *pl;
+        upl_t            upl;
+        addr64_t            ubc_paddr;
+        kern_return_t    kret;
+        int              error = 0;
+
+        iov = uio->uio_iov;
+
+        kret = ubc_create_upl(vp,
+                              uio->uio_offset & ~PAGE_MASK_64,
+                              PAGE_SIZE,
+                              &upl,
+                              &pl,
+                              UPL_FLAGS_NONE);
+
+        if (kret != KERN_SUCCESS)
+                return(EINVAL);
+
+        if (!upl_valid_page(pl, 0)) {
+                /*
+                 * issue a synchronous read to cluster_io
+                 */
+                error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize,
+                                  CL_READ, (struct buf *)0, (struct clios *)0);
+                if (error) {
+                          ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
+
+                          return(error);
+                }
+        }
+        ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
+
+/*
+ *             NOTE:  There is no prototype for the following in BSD. It, and the definitions
+ *             of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
+ *             osfmk/ppc/mappings.h.  They are not included here because there appears to be no
+ *             way to do so without exporting them to kexts as well.
+ */
+               if (flags & CL_READ)
+//                     copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk);    /* Copy physical to physical and flush the destination */
+                       copypv(ubc_paddr, usr_paddr, xsize,        2 |        1 |        4);    /* Copy physical to physical and flush the destination */
+               else
+//                     copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc);    /* Copy physical to physical and flush the source */
+                       copypv(ubc_paddr, usr_paddr, xsize,        2 |        1 |        8);    /* Copy physical to physical and flush the source */
+       
+               if ( !(flags & CL_READ) || upl_dirty_page(pl, 0)) {
+                       /*
+                       * issue a synchronous write to cluster_io
+                       */
+                       error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize,
+                               0, (struct buf *)0, (struct clios *)0);
+               }
+               if (error == 0) {
+                       uio->uio_offset += xsize;
+                       iov->iov_base   += xsize;
+                       iov->iov_len    -= xsize;
+                       uio->uio_resid  -= xsize;
+               }
+               ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
+       
+               return (error);
+}