]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vfs/vfs_cluster.c
xnu-1228.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cluster.c
index df2e7375144c2142f50f58f754aeed59057af4ee..8d3657909e9a96051b082c955932698f8e136eab 100644 (file)
@@ -1,24 +1,29 @@
-
 /*
- * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
 /*
  */
 
 #include <sys/param.h>
-#include <sys/proc.h>
-#include <sys/buf.h>
-#include <sys/vnode.h>
-#include <sys/mount.h>
+#include <sys/proc_internal.h>
+#include <sys/buf_internal.h>
+#include <sys/mount_internal.h>
+#include <sys/vnode_internal.h>
 #include <sys/trace.h>
 #include <sys/malloc.h>
+#include <sys/time.h>
+#include <sys/kernel.h>
 #include <sys/resourcevar.h>
+#include <sys/uio_internal.h>
 #include <libkern/libkern.h>
+#include <machine/machine_routines.h>
+
+#include <sys/ubc_internal.h>
+#include <vm/vnode_pager.h>
+
+#include <mach/mach_types.h>
+#include <mach/memory_object_types.h>
+#include <mach/vm_map.h>
+#include <mach/upl.h>
 
-#include <sys/ubc.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
 #include <vm/vm_pageout.h>
 
 #include <sys/kdebug.h>
 
-#define CL_READ      0x01
-#define CL_ASYNC     0x02
-#define CL_COMMIT    0x04
-#define CL_PAGEOUT   0x10
-#define CL_AGE       0x20
-#define CL_DUMP      0x40
-#define CL_NOZERO    0x80
-#define CL_PAGEIN    0x100
-#define CL_DEV_MEMORY 0x200
-
-static void cluster_zero(upl_t upl, vm_offset_t   upl_offset,
-               int size, struct buf *bp);
-static int cluster_read_x(struct vnode *vp, struct uio *uio,
-               off_t filesize, int devblocksize, int flags);
-static int cluster_write_x(struct vnode *vp, struct uio *uio,
-               off_t oldEOF, off_t newEOF, off_t headOff,
-               off_t tailOff, int devblocksize, int flags);
-static int cluster_nocopy_read(struct vnode *vp, struct uio *uio,
-               off_t filesize, int devblocksize, int flags);
-static int cluster_nocopy_write(struct vnode *vp, struct uio *uio,
-               off_t newEOF, int devblocksize, int flags);
-static int cluster_phys_read(struct vnode *vp, struct uio *uio,
-               off_t filesize);
-static int cluster_phys_write(struct vnode *vp, struct uio *uio, off_t newEOF);
-static int cluster_push_x(struct vnode *vp, off_t EOF, daddr_t first, daddr_t last, int can_delay);
-static int cluster_try_push(struct vnode *vp, off_t newEOF, int can_delay, int push_all);
+#define CL_READ                0x01
+#define CL_ASYNC       0x02
+#define CL_COMMIT      0x04
+#define CL_PAGEOUT     0x10
+#define CL_AGE         0x20
+#define CL_NOZERO      0x40
+#define CL_PAGEIN      0x80
+#define CL_DEV_MEMORY  0x100
+#define CL_PRESERVE    0x200
+#define CL_THROTTLE    0x400
+#define CL_KEEPCACHED  0x800
+#define CL_DIRECT_IO   0x1000
+#define CL_PASSIVE     0x2000
+
+
+struct clios {
+        u_int  io_completed;       /* amount of io that has currently completed */
+        u_int  io_issued;          /* amount of io that was successfully issued */
+        int    io_error;           /* error code of first error encountered */
+        int    io_wanted;          /* someone is sleeping waiting for a change in state */
+};
+
+static lck_grp_t       *cl_mtx_grp;
+static lck_attr_t      *cl_mtx_attr;
+static lck_grp_attr_t   *cl_mtx_grp_attr;
+static lck_mtx_t       *cl_mtxp;
+
+
+#define        IO_UNKNOWN      0
+#define        IO_DIRECT       1
+#define IO_CONTIG      2
+#define IO_COPY                3
+
+#define        PUSH_DELAY      0x01
+#define PUSH_ALL       0x02
+#define        PUSH_SYNC       0x04
+
+
+static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset);
+static void cluster_wait_IO(buf_t cbp_head, int async);
+static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
+
+static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
+
+static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
+                     int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
+static int cluster_iodone(buf_t bp, void *callback_arg);
+static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags);
+static int cluster_hard_throttle_on(vnode_t vp);
+
+static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg);
+
+static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int flags);
+static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
+
+static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size,  off_t filesize, int flags,
+                            int (*)(buf_t, void *), void *callback_arg);
+static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
+                              int flags, int (*)(buf_t, void *), void *callback_arg);
+static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
+                              int (*)(buf_t, void *), void *callback_arg, int flags);
+
+static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
+                             off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg);
+static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF,
+                               int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg);
+static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
+                               int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag);
+
+static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
+
+static int     cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
+static void    cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
+
+static int     cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg);
+
+static int     cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int (*)(buf_t, void *), void *callback_arg);
+
+static void    sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
+static void    sparse_cluster_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int (*)(buf_t, void *), void *callback_arg);
+static void    sparse_cluster_add(struct cl_writebehind *, vnode_t vp, struct cl_extent *, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
+
+static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
+static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
+static kern_return_t vfs_drt_control(void **cmapp, int op_type);
+
+int    is_file_clean(vnode_t, off_t);
+
+/*
+ * limit the internal I/O size so that we
+ * can represent it in a 32 bit int
+ */
+#define MAX_IO_REQUEST_SIZE    (1024 * 1024 * 256)
+#define MAX_IO_CONTIG_SIZE     (1024 * 1024 * 8)
+#define MAX_VECTS      16
+
+/*
+ * note:  MAX_CLUSTER_SIZE CANNOT be larger than MAX_UPL_TRANSFER
+ */
+#define MAX_CLUSTER_SIZE       (MAX_UPL_TRANSFER)
+#define MAX_PREFETCH           (MAX_CLUSTER_SIZE * PAGE_SIZE * 2)
+#define MIN_DIRECT_WRITE_SIZE  (4 * PAGE_SIZE)
+
 
+int speculative_reads_disabled = 0;
 
 /*
  * throttle the number of async writes that
  * can be outstanding on a single vnode
  * before we issue a synchronous write 
  */
-#define ASYNC_THROTTLE  9
+#define HARD_THROTTLE_MAXCNT   0
+#define HARD_THROTTLE_MAXSIZE  (64 * 1024)
+
+int hard_throttle_on_root = 0;
+struct timeval priority_IO_timestamp_for_root;
+
+
+void
+cluster_init(void) {
+        /*
+        * allocate lock group attribute and group
+        */
+        cl_mtx_grp_attr = lck_grp_attr_alloc_init();
+       cl_mtx_grp = lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr);
+               
+       /*
+        * allocate the lock attribute
+        */
+       cl_mtx_attr = lck_attr_alloc_init();
+
+       /*
+        * allocate and initialize mutex's used to protect updates and waits
+        * on the cluster_io context
+        */
+       cl_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr);
+
+       if (cl_mtxp == NULL)
+               panic("cluster_init: failed to allocate cl_mtxp");
+}
+
+
+
+#define CLW_ALLOCATE           0x01
+#define CLW_RETURNLOCKED       0x02
+#define CLW_IONOCACHE          0x04
+#define CLW_IOPASSIVE  0x08
+
+/*
+ * if the read ahead context doesn't yet exist,
+ * allocate and initialize it...
+ * the vnode lock serializes multiple callers
+ * during the actual assignment... first one
+ * to grab the lock wins... the other callers
+ * will release the now unnecessary storage
+ * 
+ * once the context is present, try to grab (but don't block on)
+ * the lock associated with it... if someone
+ * else currently owns it, than the read
+ * will run without read-ahead.  this allows
+ * multiple readers to run in parallel and
+ * since there's only 1 read ahead context,
+ * there's no real loss in only allowing 1
+ * reader to have read-ahead enabled.
+ */
+static struct cl_readahead *
+cluster_get_rap(vnode_t vp)
+{
+        struct ubc_info                *ubc;
+       struct cl_readahead     *rap;
+
+       ubc = vp->v_ubcinfo;
+
+        if ((rap = ubc->cl_rahead) == NULL) {
+               MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK);
+
+               bzero(rap, sizeof *rap);
+               rap->cl_lastr = -1;
+               lck_mtx_init(&rap->cl_lockr, cl_mtx_grp, cl_mtx_attr);
+
+               vnode_lock(vp);
+               
+               if (ubc->cl_rahead == NULL)
+                       ubc->cl_rahead = rap;
+               else {
+                       lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
+                       FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
+                       rap = ubc->cl_rahead;
+               }
+               vnode_unlock(vp);
+       }
+       if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE)
+               return(rap);
+       
+       return ((struct cl_readahead *)NULL);
+}
+
+
+/*
+ * if the write behind context doesn't yet exist,
+ * and CLW_ALLOCATE is specified, allocate and initialize it...
+ * the vnode lock serializes multiple callers
+ * during the actual assignment... first one
+ * to grab the lock wins... the other callers
+ * will release the now unnecessary storage
+ * 
+ * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
+ * the lock associated with the write behind context before
+ * returning
+ */
+
+static struct cl_writebehind *
+cluster_get_wbp(vnode_t vp, int flags)
+{
+        struct ubc_info *ubc;
+       struct cl_writebehind *wbp;
+
+       ubc = vp->v_ubcinfo;
+
+        if ((wbp = ubc->cl_wbehind) == NULL) {
+
+               if ( !(flags & CLW_ALLOCATE))
+                       return ((struct cl_writebehind *)NULL);
+         
+               MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK);
+
+               bzero(wbp, sizeof *wbp);
+               lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr);
+
+               vnode_lock(vp);
+               
+               if (ubc->cl_wbehind == NULL)
+                       ubc->cl_wbehind = wbp;
+               else {
+                       lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
+                       FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
+                       wbp = ubc->cl_wbehind;
+               }
+               vnode_unlock(vp);
+       }
+       if (flags & CLW_RETURNLOCKED)
+               lck_mtx_lock(&wbp->cl_lockw);
+
+       return (wbp);
+}
+
+
+static void
+cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg)
+{
+       struct cl_writebehind *wbp;
+
+       if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
+         
+               if (wbp->cl_number) {
+                       lck_mtx_lock(&wbp->cl_lockw);
+
+                       cluster_try_push(wbp, vp, newEOF, PUSH_ALL | PUSH_SYNC, callback, callback_arg);
+
+                       lck_mtx_unlock(&wbp->cl_lockw);
+               }
+       }
+}
+
+
+static int 
+cluster_hard_throttle_on(vnode_t vp)
+{
+        static struct timeval hard_throttle_maxelapsed = { 0, 200000 };
+
+       if (vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV) {
+               struct timeval elapsed;
+
+               if (hard_throttle_on_root)
+                       return(1);
+
+               microuptime(&elapsed);
+               timevalsub(&elapsed, &priority_IO_timestamp_for_root);
+
+               if (timevalcmp(&elapsed, &hard_throttle_maxelapsed, <))
+                       return(1);
+       }
+       return(0);
+}
+
+
+static int
+cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags)
+{
+        int upl_abort_code = 0;
+       int page_in  = 0;
+       int page_out = 0;
+
+       if (io_flags & B_PHYS)
+               /*
+                * direct write of any flavor, or a direct read that wasn't aligned
+                */
+               ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
+       else {
+               if (io_flags & B_PAGEIO) {
+                       if (io_flags & B_READ)
+                               page_in  = 1;
+                       else
+                               page_out = 1;
+               }
+               if (io_flags & B_CACHE)
+                       /*
+                        * leave pages in the cache unchanged on error
+                        */
+                       upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
+               else if (page_out && (error != ENXIO))
+                       /*
+                        * transient error... leave pages unchanged
+                        */
+                       upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
+               else if (page_in)
+                       upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
+               else
+                       upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
+
+               ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
+       }
+       return (upl_abort_code);
+}
+
 
 static int
-cluster_iodone(bp)
-       struct buf *bp;
+cluster_iodone(buf_t bp, void *callback_arg)
 {
-        int         b_flags;
-        int         error;
-       int         total_size;
-       int         total_resid;
-       int         upl_offset;
-       int         zero_offset;
-       upl_t       upl;
-       struct buf *cbp;
-       struct buf *cbp_head;
-       struct buf *cbp_next;
-       struct buf *real_bp;
-       struct vnode *vp;
-       int         commit_size;
-       int         pg_offset;
-
-
-       cbp_head = (struct buf *)(bp->b_trans_head);
+        int    b_flags;
+        int    error;
+       int     total_size;
+       int     total_resid;
+       int     upl_offset;
+       int     zero_offset;
+       int     pg_offset = 0;
+        int    commit_size = 0;
+        int    upl_flags = 0;
+       int     transaction_size = 0;
+       upl_t   upl;
+       buf_t   cbp;
+       buf_t   cbp_head;
+       buf_t   cbp_next;
+       buf_t   real_bp;
+       struct  clios *iostate;
+       boolean_t       transaction_complete = FALSE;
+
+       cbp_head = (buf_t)(bp->b_trans_head);
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
                     (int)cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
@@ -141,8 +451,16 @@ cluster_iodone(bp)
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
                                     (int)cbp_head, (int)cbp, cbp->b_bcount, cbp->b_flags, 0);
 
-                       return 0;
+                       return 0;
                }
+               if (cbp->b_flags & B_EOT)
+                       transaction_complete = TRUE;
+       }
+       if (transaction_complete == FALSE) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
+                            (int)cbp_head, 0, 0, 0, 0);
+
+               return 0;
        }
        error       = 0;
        total_size  = 0;
@@ -150,16 +468,16 @@ cluster_iodone(bp)
 
        cbp        = cbp_head;
        upl_offset = cbp->b_uploffset;
-       upl        = cbp->b_pagelist;
+       upl        = cbp->b_upl;
        b_flags    = cbp->b_flags;
        real_bp    = cbp->b_real_bp;
-       vp         = cbp->b_vp;
        zero_offset= cbp->b_validend;
+       iostate    = (struct clios *)cbp->b_iostate;
 
-       while (cbp) {
-               if (cbp->b_vectorcount > 1)
-                       _FREE(cbp->b_vectorlist, M_SEGMENT);
+       if (real_bp)
+               real_bp->b_dev = cbp->b_dev;
 
+       while (cbp) {
                if ((cbp->b_flags & B_ERROR) && error == 0)
                        error = cbp->b_error;
 
@@ -168,164 +486,352 @@ cluster_iodone(bp)
 
                cbp_next = cbp->b_trans_next;
 
-               free_io_buf(cbp);
+               if (cbp_next == NULL)
+                       /*
+                        * compute the overall size of the transaction
+                        * in case we created one that has 'holes' in it
+                        * 'total_size' represents the amount of I/O we
+                        * did, not the span of the transaction w/r to the UPL
+                        */
+                       transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
+
+               if (cbp != cbp_head)
+                       free_io_buf(cbp);
 
                cbp = cbp_next;
        }
-       if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= (ASYNC_THROTTLE / 3))) {
-               vp->v_flag &= ~VTHROTTLED;
-               wakeup((caddr_t)&vp->v_numoutput);
+       if (error == 0 && total_resid)
+               error = EIO;
+
+       if (error == 0) {
+               int     (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
+
+               if (cliodone_func != NULL) {
+                       cbp_head->b_bcount = transaction_size;
+
+                       error = (*cliodone_func)(cbp_head, callback_arg);
+               }
        }
        if (zero_offset)
                cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
 
+        free_io_buf(cbp_head);
+
+       if (iostate) {
+               int need_wakeup = 0;
+
+               /*
+                * someone has issued multiple I/Os asynchrounsly
+                * and is waiting for them to complete (streaming)
+                */
+               lck_mtx_lock_spin(cl_mtxp);
+
+               if (error && iostate->io_error == 0)
+                       iostate->io_error = error;
+
+               iostate->io_completed += total_size;
+
+               if (iostate->io_wanted) {
+                       /*
+                        * someone is waiting for the state of
+                        * this io stream to change
+                        */
+                       iostate->io_wanted = 0;
+                       need_wakeup = 1;
+               }
+               lck_mtx_unlock(cl_mtxp);
+
+               if (need_wakeup)
+                       wakeup((caddr_t)&iostate->io_wanted);
+       }
+
+       if (b_flags & B_COMMIT_UPL) {
+
+               pg_offset   = upl_offset & PAGE_MASK;
+               commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
+
+               if (error)
+                       upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags);
+               else {
+                       upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
+
+                       if ((b_flags & B_PHYS) && (b_flags & B_READ)) 
+                               upl_flags |= UPL_COMMIT_SET_DIRTY;
+
+                       if (b_flags & B_AGE)
+                               upl_flags |= UPL_COMMIT_INACTIVATE;
+
+                       ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
+               }
+       }
        if ((b_flags & B_NEED_IODONE) && real_bp) {
                if (error) {
-                       real_bp->b_flags |= B_ERROR;
+                       real_bp->b_flags |= B_ERROR;
                        real_bp->b_error = error;
                }
                real_bp->b_resid = total_resid;
 
-               biodone(real_bp);
+               buf_biodone(real_bp);
        }
-       if (error == 0 && total_resid)
-               error = EIO;
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
+                    (int)upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
 
-       if (b_flags & B_COMMIT_UPL) {
-               pg_offset   = upl_offset & PAGE_MASK;
-               commit_size = (((pg_offset + total_size) + (PAGE_SIZE - 1)) / PAGE_SIZE) * PAGE_SIZE;
+       return (error);
+}
 
-               if (error || (b_flags & B_NOCACHE)) {
-                       int upl_abort_code;
 
-                       if ((b_flags & B_PAGEOUT) && (error != ENXIO)) /* transient error */
-                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
-                       else if (b_flags & B_PGIN)
-                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
-                       else
-                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
+void
+cluster_zero(upl_t upl, vm_offset_t upl_offset, int size, buf_t bp)
+{
 
-                       ubc_upl_abort_range(upl, upl_offset - pg_offset, commit_size,
-                                       upl_abort_code);
-                       
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
-                                    (int)upl, upl_offset - pg_offset, commit_size,
-                                    0x80000000|upl_abort_code, 0);
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
+                    upl_offset, size, (int)bp, 0, 0);
+
+       if (bp == NULL || bp->b_datap == 0) {
+               upl_page_info_t *pl;
+               addr64_t        zero_addr;
 
+               pl = ubc_upl_pageinfo(upl);
+
+               if (upl_device_page(pl) == TRUE) {
+                       zero_addr = ((addr64_t)upl_phys_page(pl, 0) << 12) + upl_offset;
+
+                       bzero_phys_nc(zero_addr, size);
                } else {
-                       int upl_commit_flags = UPL_COMMIT_FREE_ON_EMPTY;
+                       while (size) {
+                               int     page_offset;
+                               int     page_index;
+                               int     zero_cnt;
 
-                       if ( !(b_flags & B_PAGEOUT))
-                               upl_commit_flags |= UPL_COMMIT_CLEAR_DIRTY;
-                       if (b_flags & B_AGE)
-                               upl_commit_flags |= UPL_COMMIT_INACTIVATE;
+                               page_index  = upl_offset / PAGE_SIZE;
+                               page_offset = upl_offset & PAGE_MASK;
+
+                               zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << 12) + page_offset;
+                               zero_cnt  = min(PAGE_SIZE - page_offset, size);
 
-                       ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size,
-                                       upl_commit_flags);
+                               bzero_phys(zero_addr, zero_cnt);
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
-                                    (int)upl, upl_offset - pg_offset, commit_size,
-                                    upl_commit_flags, 0);
+                               size       -= zero_cnt;
+                               upl_offset += zero_cnt;
+                       }
                }
-       } else 
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
-                            (int)upl, upl_offset, 0, error, 0);
+       } else
+               bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
 
-       return (error);
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
+                    upl_offset, size, 0, 0, 0);
 }
 
 
 static void
-cluster_zero(upl, upl_offset, size, bp)
-       upl_t         upl;
-       vm_offset_t   upl_offset;
-       int           size;
-       struct buf   *bp;
+cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset)
 {
-        vm_offset_t   io_addr = 0;
-       int           must_unmap = 0;
-       kern_return_t kret;
+        cbp_head->b_validend = zero_offset;
+        cbp_tail->b_flags |= B_EOT;
+}
 
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_NONE,
-                    upl_offset, size, (int)bp, 0, 0);
+static void
+cluster_wait_IO(buf_t cbp_head, int async)
+{
+        buf_t  cbp;
 
-       if (bp == NULL || bp->b_data == NULL) {
-               kret = ubc_upl_map(upl, &io_addr);
-               
-               if (kret != KERN_SUCCESS)
-                       panic("cluster_zero: ubc_upl_map() failed with (%d)", kret);
-               if (io_addr == 0) 
-                       panic("cluster_zero: ubc_upl_map() mapped 0");
+       if (async) {
+               /*
+                * async callback completion will not normally
+                * generate a wakeup upon I/O completion...
+                * by setting BL_WANTED, we will force a wakeup
+                * to occur as any outstanding I/Os complete... 
+                * I/Os already completed will have BL_CALLDONE already
+                * set and we won't block in buf_biowait_callback..
+                * note that we're actually waiting for the bp to have
+                * completed the callback function... only then
+                * can we safely take back ownership of the bp
+                * need the main buf mutex in order to safely
+                * update b_lflags
+                */
+               buf_list_lock();
 
-               must_unmap = 1;
-       } else
-               io_addr = (vm_offset_t)bp->b_data;
-       bzero((caddr_t)(io_addr + upl_offset), size);
-       
-       if (must_unmap) {
-               kret = ubc_upl_unmap(upl);
+               for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next)
+                     cbp->b_lflags |= BL_WANTED;
 
-               if (kret != KERN_SUCCESS)
-                       panic("cluster_zero: kernel_upl_unmap failed");
+               buf_list_unlock();
+       }
+       for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
+               if (async)
+                       buf_biowait_callback(cbp);
+               else
+                       buf_biowait(cbp);
+       }
+}
+
+static void
+cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
+{
+        buf_t  cbp;
+       int     error;
+
+       /*
+        * cluster_complete_transaction will
+        * only be called if we've issued a complete chain in synchronous mode
+        * or, we've already done a cluster_wait_IO on an incomplete chain
+        */
+        if (needwait) {
+               for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next)
+                       buf_biowait(cbp);
+       }
+       error = cluster_iodone(*cbp_head, callback_arg);
+
+       if ( !(flags & CL_ASYNC) && error && *retval == 0) {
+               if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO))
+                       *retval = error;
        }
+       *cbp_head = (buf_t)NULL;
 }
 
+
 static int
-cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, real_bp)
-       struct vnode *vp;
-       upl_t         upl;
-       vm_offset_t   upl_offset;
-       off_t         f_offset;
-       int           non_rounded_size;
-       int           devblocksize;
-       int           flags;
-       struct buf   *real_bp;
+cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
+          int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
 {
-       struct buf   *cbp;
-       struct iovec *iovp;
-       u_int           size;
-       int           io_flags;
-       int           error = 0;
-       int           retval = 0;
-       struct buf   *cbp_head = 0;
-       struct buf   *cbp_tail = 0;
-       upl_page_info_t *pl;
-       int buf_count = 0;
-       int pg_count;
-       int pg_offset;
-       u_int max_iosize;
-       u_int max_vectors;
-       int priv;
-       int zero_offset = 0;
+       buf_t   cbp;
+       u_int   size;
+       u_int   io_size;
+       int     io_flags;
+       int     bmap_flags;
+       int     error = 0;
+       int     retval = 0;
+       buf_t   cbp_head = NULL;
+       buf_t   cbp_tail = NULL;
+       int     trans_count = 0;
+       int     max_trans_count;
+       u_int   pg_count;
+       int     pg_offset;
+       u_int   max_iosize;
+       u_int   max_vectors;
+       int     priv;
+       int     zero_offset = 0;
+       int     async_throttle = 0;
+       mount_t mp;
+       vm_offset_t upl_end_offset;
+       boolean_t   need_EOT = FALSE;
+
+       /*
+        * we currently don't support buffers larger than a page
+        */
+       if (real_bp && non_rounded_size > PAGE_SIZE)
+               panic("%s(): Called with real buffer of size %d bytes which "
+                               "is greater than the maximum allowed size of "
+                               "%d bytes (the system PAGE_SIZE).\n",
+                               __FUNCTION__, non_rounded_size, PAGE_SIZE);
+
+       mp = vp->v_mount;
+
+       /*
+        * we don't want to do any funny rounding of the size for IO requests
+        * coming through the DIRECT or CONTIGUOUS paths...  those pages don't
+        * belong to us... we can't extend (nor do we need to) the I/O to fill
+        * out a page
+        */
+       if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
+               /*
+                * round the requested size up so that this I/O ends on a
+                * page boundary in case this is a 'write'... if the filesystem
+                * has blocks allocated to back the page beyond the EOF, we want to
+                * make sure to write out the zero's that are sitting beyond the EOF
+                * so that in case the filesystem doesn't explicitly zero this area
+                * if a hole is created via a lseek/write beyond the current EOF,
+                * it will return zeros when it's read back from the disk.  If the
+                * physical allocation doesn't extend for the whole page, we'll
+                * only write/read from the disk up to the end of this allocation
+                * via the extent info returned from the VNOP_BLOCKMAP call.
+                */
+               pg_offset = upl_offset & PAGE_MASK;
+
+               size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
+       } else {
+               /*
+                * anyone advertising a blocksize of 1 byte probably
+                * can't deal with us rounding up the request size
+                * AFP is one such filesystem/device
+                */
+               size = non_rounded_size;
+       }
+       upl_end_offset = upl_offset + size;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
+
+       /*
+        * Set the maximum transaction size to the maximum desired number of
+        * buffers.
+        */
+       max_trans_count = 8;
+       if (flags & CL_DEV_MEMORY)
+               max_trans_count = 16;
 
        if (flags & CL_READ) {
-               io_flags = (B_VECTORLIST | B_READ);
+               io_flags = B_READ;
+               bmap_flags = VNODE_READ;
 
-               vfs_io_attributes(vp, B_READ, &max_iosize, &max_vectors);
+               max_iosize  = mp->mnt_maxreadcnt;
+               max_vectors = mp->mnt_segreadcnt;
        } else {
-               io_flags = (B_VECTORLIST | B_WRITEINPROG);
+               io_flags = B_WRITE;
+               bmap_flags = VNODE_WRITE;
 
-               vfs_io_attributes(vp, B_WRITE, &max_iosize, &max_vectors);
+               max_iosize  = mp->mnt_maxwritecnt;
+               max_vectors = mp->mnt_segwritecnt;
        }
-       pl = ubc_upl_pageinfo(upl);
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
 
-       if (flags & CL_ASYNC)
-               io_flags |= (B_CALL | B_ASYNC);
-       if (flags & CL_AGE)
-               io_flags |= B_AGE;
-       if (flags & CL_DUMP)
-               io_flags |= B_NOCACHE;
-       if (flags & CL_PAGEIN)
-               io_flags |= B_PGIN;
+       /*
+        * make sure the maximum iosize is a
+        * multiple of the page size
+        */
+       max_iosize  &= ~PAGE_MASK;
 
-       if (devblocksize)
-               size = (non_rounded_size + (devblocksize - 1)) & ~(devblocksize - 1);
-       else
-               size = non_rounded_size;
+       /*
+        * Ensure the maximum iosize is sensible.
+        */
+       if (!max_iosize)
+               max_iosize = PAGE_SIZE;
+
+       if (flags & CL_THROTTLE) {
+               if ( !(flags & CL_PAGEOUT) && cluster_hard_throttle_on(vp)) {
+                       if (max_iosize > HARD_THROTTLE_MAXSIZE)
+                               max_iosize = HARD_THROTTLE_MAXSIZE;
+                       async_throttle = HARD_THROTTLE_MAXCNT;
+               } else {
+                       if ( (flags & CL_DEV_MEMORY) )
+                               async_throttle = VNODE_ASYNC_THROTTLE;
+                       else {
+                               u_int max_cluster;
+                               
+                               if (max_iosize > (MAX_CLUSTER_SIZE * PAGE_SIZE))
+                                       max_cluster = (MAX_CLUSTER_SIZE * PAGE_SIZE);
+                               else
+                                       max_cluster = max_iosize;
 
+                               if (size < max_cluster)
+                                       max_cluster = size;
 
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START,
-                    (int)f_offset, size, upl_offset, flags, 0);
+                               async_throttle = min(VNODE_ASYNC_THROTTLE, (MAX_PREFETCH / max_cluster) - 1);
+                       }
+               }
+       }
+       if (flags & CL_AGE)
+               io_flags |= B_AGE;
+       if (flags & (CL_PAGEIN | CL_PAGEOUT))
+               io_flags |= B_PAGEIO;
+       if (flags & CL_COMMIT)
+               io_flags |= B_COMMIT_UPL;
+       if (flags & CL_PRESERVE)
+               io_flags |= B_PHYS;
+       if (flags & CL_KEEPCACHED)
+               io_flags |= B_CACHE;
+       if (flags & CL_PASSIVE)
+               io_flags |= B_PASSIVE;
+       if (vp->v_flag & VSYSTEM)
+               io_flags |= B_META;
 
        if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
                /*
@@ -338,272 +844,407 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags,
                zero_offset = upl_offset + non_rounded_size;
        }
        while (size) {
-               size_t io_size;
-               int vsize;
-               int i;
-               int pl_index;
-               int pg_resid;
-               int num_contig;
-               daddr_t lblkno;
-               daddr_t blkno;
+               daddr64_t blkno;
+               daddr64_t lblkno;
+               u_int   io_size_wanted;
 
                if (size > max_iosize)
                        io_size = max_iosize;
                else
                        io_size = size;
 
-               if (error = VOP_CMAP(vp, f_offset, io_size, &blkno, &io_size, NULL)) {
-                       if (error == EOPNOTSUPP)
-                               panic("VOP_CMAP Unimplemented");
+               io_size_wanted = io_size;
+               
+               if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, (size_t *)&io_size, NULL, bmap_flags, NULL)))
                        break;
-               }
+
+               if (io_size > io_size_wanted)
+                       io_size = io_size_wanted;
+
+               if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno))
+                       real_bp->b_blkno = blkno;
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
-                            (int)f_offset, (int)blkno, io_size, zero_offset, 0);
+                            (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0);
+
+               if (io_size == 0) {
+                       /*
+                        * vnop_blockmap didn't return an error... however, it did
+                        * return an extent size of 0 which means we can't
+                        * make forward progress on this I/O... a hole in the
+                        * file would be returned as a blkno of -1 with a non-zero io_size
+                        * a real extent is returned with a blkno != -1 and a non-zero io_size
+                        */
+                       error = EINVAL;
+                       break;
+               }
+               if ( !(flags & CL_READ) && blkno == -1) {
+                       off_t   e_offset;
+                       int     pageout_flags;
 
-               if ( (!(flags & CL_READ) && (long)blkno == -1) || io_size == 0) {
+                       /*
+                        * we're writing into a 'hole'
+                        */
                        if (flags & CL_PAGEOUT) {
+                               /*
+                                * if we got here via cluster_pageout 
+                                * then just error the request and return
+                                * the 'hole' should already have been covered
+                                */
                                error = EINVAL;
                                break;
-                       };
-                       
-                       /* Try paging out the page individually before
-                          giving up entirely and dumping it (it could
-                          be mapped in a "hole" and require allocation
-                          before the I/O:
-                        */
-                        ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE_64, UPL_ABORT_FREE_ON_EMPTY);
-                        if (ubc_pushdirty_range(vp, f_offset, PAGE_SIZE_64) == 0) {
-                               error = EINVAL;
-                               break;
-                        };
-                        
-                       upl_offset += PAGE_SIZE_64;
-                       f_offset   += PAGE_SIZE_64;
-                       size       -= PAGE_SIZE_64;
-                       continue;
-               }
-               lblkno = (daddr_t)(f_offset / PAGE_SIZE_64);
-               /*
+                       }
+                       /*
+                        * we can get here if the cluster code happens to 
+                        * pick up a page that was dirtied via mmap vs
+                        * a 'write' and the page targets a 'hole'...
+                        * i.e. the writes to the cluster were sparse
+                        * and the file was being written for the first time
+                        *
+                        * we can also get here if the filesystem supports
+                        * 'holes' that are less than PAGE_SIZE.... because
+                        * we can't know if the range in the page that covers
+                        * the 'hole' has been dirtied via an mmap or not,
+                        * we have to assume the worst and try to push the
+                        * entire page to storage.
+                        *
+                        * Try paging out the page individually before
+                        * giving up entirely and dumping it (the pageout
+                        * path will insure that the zero extent accounting
+                        * has been taken care of before we get back into cluster_io)
+                        *
+                        * go direct to vnode_pageout so that we don't have to
+                        * unbusy the page from the UPL... we used to do this
+                        * so that we could call ubc_sync_range, but that results
+                        * in a potential deadlock if someone else races us to acquire
+                        * that page and wins and in addition needs one of the pages
+                        * we're continuing to hold in the UPL
+                        */
+                       pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
+
+                       if ( !(flags & CL_ASYNC))
+                               pageout_flags |= UPL_IOSYNC;
+                       if ( !(flags & CL_COMMIT))
+                               pageout_flags |= UPL_NOCOMMIT;
+
+                       if (cbp_head) {
+                               buf_t last_cbp;
+
+                               /*
+                                * first we have to wait for the the current outstanding I/Os
+                                * to complete... EOT hasn't been set yet on this transaction
+                                * so the pages won't be released just because all of the current
+                                * I/O linked to this transaction has completed...
+                                */
+                               cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
+
+                               /*
+                                * we've got a transcation that
+                                * includes the page we're about to push out through vnode_pageout...
+                                * find the last bp in the list which will be the one that
+                                * includes the head of this page and round it's iosize down
+                                * to a page boundary...
+                                */
+                                for (last_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next)
+                                       last_cbp = cbp;
+
+                               cbp->b_bcount &= ~PAGE_MASK;
+
+                               if (cbp->b_bcount == 0) {
+                                       /*
+                                        * this buf no longer has any I/O associated with it
+                                        */
+                                       free_io_buf(cbp);
+
+                                       if (cbp == cbp_head) {
+                                               /*
+                                                * the buf we just freed was the only buf in
+                                                * this transaction... so there's no I/O to do
+                                                */
+                                               cbp_head = NULL;
+                                       } else {
+                                               /*
+                                                * remove the buf we just freed from
+                                                * the transaction list
+                                                */
+                                               last_cbp->b_trans_next = NULL;
+                                               cbp_tail = last_cbp;
+                                       }
+                               }
+                               if (cbp_head) {
+                                       /*
+                                        * there was more to the current transaction
+                                        * than just the page we are pushing out via vnode_pageout...
+                                        * mark it as finished and complete it... we've already
+                                        * waited for the I/Os to complete above in the call to cluster_wait_IO
+                                        */
+                                       cluster_EOT(cbp_head, cbp_tail, 0);
+
+                                       cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
+
+                                       trans_count = 0;
+                               }
+                       }
+                       if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
+                               error = EINVAL;
+                               break;
+                       }
+                       e_offset = round_page_64(f_offset + 1);
+                       io_size = e_offset - f_offset;
+
+                       f_offset   += io_size;
+                       upl_offset += io_size;
+
+                       if (size >= io_size)
+                               size -= io_size;
+                       else
+                               size = 0;
+                       /*
+                        * keep track of how much of the original request
+                        * that we've actually completed... non_rounded_size
+                        * may go negative due to us rounding the request
+                        * to a page size multiple (i.e.  size > non_rounded_size)
+                        */
+                       non_rounded_size -= io_size;
+
+                       if (non_rounded_size <= 0) {
+                               /*
+                                * we've transferred all of the data in the original
+                                * request, but we were unable to complete the tail
+                                * of the last page because the file didn't have
+                                * an allocation to back that portion... this is ok.
+                                */
+                               size = 0;
+                       }
+                       continue;
+               }
+               lblkno = (daddr64_t)(f_offset / PAGE_SIZE_64);
+               /*
                 * we have now figured out how much I/O we can do - this is in 'io_size'
-                * pl_index represents the first page in the 'upl' that the I/O will occur for
                 * pg_offset is the starting point in the first page for the I/O
                 * pg_count is the number of full and partial pages that 'io_size' encompasses
                 */
-               pl_index  = upl_offset / PAGE_SIZE; 
                pg_offset = upl_offset & PAGE_MASK;
-               pg_count  = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
 
                if (flags & CL_DEV_MEMORY) {
-                       /*
-                        * currently, can't deal with reading 'holes' in file
-                        */
-                       if ((long)blkno == -1) {
-                               error = EINVAL;
-                               break;
-                       }
                        /*
                         * treat physical requests as one 'giant' page
                         */
                        pg_count = 1;
-               }
-               if ((flags & CL_READ) && (long)blkno == -1) {
+               } else
+                       pg_count  = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
+
+               if ((flags & CL_READ) && blkno == -1) {
+                       vm_offset_t  commit_offset;
                        int bytes_to_zero;
+                       int complete_transaction_now = 0;
 
                        /*
                         * if we're reading and blkno == -1, then we've got a
                         * 'hole' in the file that we need to deal with by zeroing
                         * out the affected area in the upl
                         */
-                       if (zero_offset && io_size == size) {
+                       if (io_size >= (u_int)non_rounded_size) {
                                /*
                                 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
                                 * than 'zero_offset' will be non-zero
-                                * if the 'hole' returned by VOP_CMAP extends all the way to the eof
+                                * if the 'hole' returned by vnop_blockmap extends all the way to the eof
                                 * (indicated by the io_size finishing off the I/O request for this UPL)
                                 * than we're not going to issue an I/O for the
                                 * last page in this upl... we need to zero both the hole and the tail
                                 * of the page beyond the EOF, since the delayed zero-fill won't kick in 
                                 */
-                               bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
+                               bytes_to_zero = non_rounded_size;
+                               if (!(flags & CL_NOZERO))
+                                       bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
 
                                zero_offset = 0;
                        } else
                                bytes_to_zero = io_size;
 
-                       cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
+                       pg_count = 0;
+
+                       cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
                          
-                       if (cbp_head)
+                       if (cbp_head) {
+                               int     pg_resid;
+
                                /*
                                 * if there is a current I/O chain pending
                                 * then the first page of the group we just zero'd
                                 * will be handled by the I/O completion if the zero
                                 * fill started in the middle of the page
                                 */
-                               pg_count = (io_size - pg_offset) / PAGE_SIZE;
-                       else {
-                               /*
-                                * no pending I/O to pick up that first page
-                                * so, we have to make sure it gets committed
-                                * here.
-                                * set the pg_offset to 0 so that the upl_commit_range
-                                * starts with this page
-                                */
-                               pg_count = (io_size + pg_offset) / PAGE_SIZE;
-                               pg_offset = 0;
-                       }
-                       if (io_size == size && ((upl_offset + io_size) & PAGE_MASK))
+                               commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
+
+                               pg_resid = commit_offset - upl_offset;
+                                       
+                               if (bytes_to_zero >= pg_resid) {
+                                       /*
+                                        * the last page of the current I/O 
+                                        * has been completed...
+                                        * compute the number of fully zero'd 
+                                        * pages that are beyond it
+                                        * plus the last page if its partial
+                                        * and we have no more I/O to issue...
+                                        * otherwise a partial page is left
+                                        * to begin the next I/O
+                                        */
+                                       if ((int)io_size >= non_rounded_size)
+                                               pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
+                                       else
+                                               pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
+                                       
+                                       complete_transaction_now = 1;
+                               }
+                       } else {
                                /*
-                                * if we're done with the request for this UPL
-                                * then we have to make sure to commit the last page
-                                * even if we only partially zero-filled it
+                                * no pending I/O to deal with
+                                * so, commit all of the fully zero'd pages
+                                * plus the last page if its partial
+                                * and we have no more I/O to issue...
+                                * otherwise a partial page is left
+                                * to begin the next I/O
                                 */
-                               pg_count++;
-
-                       if (pg_count) {
-                               if (pg_offset)
-                                       pg_resid = PAGE_SIZE - pg_offset;
+                               if ((int)io_size >= non_rounded_size)
+                                       pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
                                else
-                                       pg_resid = 0;
+                                       pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
 
-                               if (flags & CL_COMMIT)
-                                       ubc_upl_commit_range(upl,
-                                                       (upl_offset + pg_resid) & ~PAGE_MASK, 
-                                                       pg_count * PAGE_SIZE,
-                                                       UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
+                               commit_offset = upl_offset & ~PAGE_MASK;
+                       }
+                       if ( (flags & CL_COMMIT) && pg_count) {
+                               ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE,
+                                                    UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
                        }
                        upl_offset += io_size;
                        f_offset   += io_size;
                        size       -= io_size;
 
-                       if (cbp_head && pg_count) 
-                               goto start_io;
-                       continue;
+                       /*
+                        * keep track of how much of the original request
+                        * that we've actually completed... non_rounded_size
+                        * may go negative due to us rounding the request
+                        * to a page size multiple (i.e.  size > non_rounded_size)
+                        */
+                       non_rounded_size -= io_size;
 
-               } else if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) {
-                       real_bp->b_blkno = blkno;
-               }
+                       if (non_rounded_size <= 0) {
+                               /*
+                                * we've transferred all of the data in the original
+                                * request, but we were unable to complete the tail
+                                * of the last page because the file didn't have
+                                * an allocation to back that portion... this is ok.
+                                */
+                               size = 0;
+                       }
+                       if (cbp_head && (complete_transaction_now || size == 0))  {
+                               cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
+
+                               cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
 
-               if (pg_count > 1) {
-                       if (pg_count > max_vectors) {
-                               io_size -= (pg_count - max_vectors) * PAGE_SIZE;
+                               cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
 
-                               if (io_size < 0) {
-                                       io_size = PAGE_SIZE - pg_offset;
-                                       pg_count = 1;
-                               } else
-                                       pg_count = max_vectors;
+                               trans_count = 0;
                        }
-                       /* 
-                        * we need to allocate space for the vector list
+                       continue;
+               }
+               if (pg_count > max_vectors) {
+                       if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
+                               io_size = PAGE_SIZE - pg_offset;
+                               pg_count = 1;
+                       } else {
+                               io_size -= (pg_count - max_vectors) * PAGE_SIZE;
+                               pg_count = max_vectors;
+                       }
+               }
+               /*
+                * If the transaction is going to reach the maximum number of
+                * desired elements, truncate the i/o to the nearest page so
+                * that the actual i/o is initiated after this buffer is
+                * created and added to the i/o chain.
+                *
+                * I/O directed to physically contiguous memory 
+                * doesn't have a requirement to make sure we 'fill' a page
+                */
+               if ( !(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
+                               ((upl_offset + io_size) & PAGE_MASK)) {
+                       vm_offset_t aligned_ofs;
+
+                       aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
+                       /*
+                        * If the io_size does not actually finish off even a
+                        * single page we have to keep adding buffers to the
+                        * transaction despite having reached the desired limit.
+                        *
+                        * Eventually we get here with the page being finished
+                        * off (and exceeded) and then we truncate the size of
+                        * this i/o request so that it is page aligned so that
+                        * we can finally issue the i/o on the transaction.
                         */
-                       if (pg_count > 1) {
-                               iovp = (struct iovec *)_MALLOC(sizeof(struct iovec) * pg_count,
-                                                              M_SEGMENT, M_NOWAIT);
-                       
-                               if (iovp == (struct iovec *) 0) {
-                                       /*
-                                        * if the allocation fails, then throttle down to a single page
-                                        */
-                                       io_size = PAGE_SIZE - pg_offset;
-                                       pg_count = 1;
-                               }
+                       if (aligned_ofs > upl_offset) {
+                               io_size = aligned_ofs - upl_offset;
+                               pg_count--;
                        }
                }
 
-               /* Throttle the speculative IO */
-               if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT))
+               if ( !(mp->mnt_kern_flag & MNTK_VIRTUALDEV))
+                       /*
+                        * if we're not targeting a virtual device i.e. a disk image
+                        * it's safe to dip into the reserve pool since real devices
+                        * can complete this I/O request without requiring additional
+                        * bufs from the alloc_io_buf pool
+                        */
+                       priv = 1;
+               else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT))
+                       /*
+                        * Throttle the speculative IO
+                        */
                        priv = 0;
                else
                        priv = 1;
 
                cbp = alloc_io_buf(vp, priv);
 
-               if (pg_count == 1)
-                       /*
-                        * we use the io vector that's reserved in the buffer header
-                        * this insures we can always issue an I/O even in a low memory
-                        * condition that prevents the _MALLOC from succeeding... this
-                        * is necessary to prevent deadlocks with the pager
-                        */
-                       iovp = (struct iovec *)(&cbp->b_vects[0]);
-
-               cbp->b_vectorlist  = (void *)iovp;
-               cbp->b_vectorcount = pg_count;
-
-               if (flags & CL_DEV_MEMORY) {
-
-                       iovp->iov_len  = io_size;
-                       iovp->iov_base = (caddr_t)upl_phys_page(pl, 0);
-
-                       if (iovp->iov_base == (caddr_t) 0) {
-                               free_io_buf(cbp);
-                               error = EINVAL;
-                       } else
-                               iovp->iov_base += upl_offset;
-               } else {
-
-                 for (i = 0, vsize = io_size; i < pg_count; i++, iovp++) {
-                       int     psize;
-
-                       psize = PAGE_SIZE - pg_offset;
-
-                       if (psize > vsize)
-                               psize = vsize;
-
-                       iovp->iov_len  = psize;
-                       iovp->iov_base = (caddr_t)upl_phys_page(pl, pl_index + i);
-
-                       if (iovp->iov_base == (caddr_t) 0) {
-                               if (pg_count > 1)
-                                       _FREE(cbp->b_vectorlist, M_SEGMENT);
-                               free_io_buf(cbp);
-
-                               error = EINVAL;
-                               break;
-                       }
-                       iovp->iov_base += pg_offset;
-                       pg_offset = 0;
+               if (flags & CL_PAGEOUT) {
+                       u_int i;
 
-                       if (flags & CL_PAGEOUT) {
-                               int         s;
-                               struct buf *bp;
-
-                               s = splbio();
-                               if (bp = incore(vp, lblkno + i)) {
-                                       if (!ISSET(bp->b_flags, B_BUSY)) {
-                                               bremfree(bp);
-                                               SET(bp->b_flags, (B_BUSY | B_INVAL));
-                                               splx(s);
-                                               brelse(bp);
-                                       } else
-                                               panic("BUSY bp found in cluster_io");
-                               }
-                               splx(s);
+                       for (i = 0; i < pg_count; i++) {
+                               if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY)
+                                       panic("BUSY bp found in cluster_io");
                        }
-                       vsize -= psize;
-                   }
                }
-               if (error)
-                       break;
-
-               if (flags & CL_ASYNC)
-                       cbp->b_iodone = (void *)cluster_iodone;
+               if (flags & CL_ASYNC) {
+                       if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg))
+                               panic("buf_setcallback failed\n");
+               }
+               cbp->b_cliodone = (void *)callback;
                cbp->b_flags |= io_flags;
 
                cbp->b_lblkno = lblkno;
                cbp->b_blkno  = blkno;
                cbp->b_bcount = io_size;
-               cbp->b_pagelist  = upl;
-               cbp->b_uploffset = upl_offset;
-               cbp->b_trans_next = (struct buf *)0;
 
-               if (flags & CL_READ)
+               if (buf_setupl(cbp, upl, upl_offset))
+                       panic("buf_setupl failed\n");
+
+               cbp->b_trans_next = (buf_t)NULL;
+
+               if ((cbp->b_iostate = (void *)iostate))
+                       /*
+                        * caller wants to track the state of this
+                        * io... bump the amount issued against this stream
+                        */
+                       iostate->io_issued += io_size;
+
+               if (flags & CL_READ) {
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
-                                    cbp->b_lblkno, cbp->b_blkno, upl_offset, io_size, 0);
-               else
+                                    (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
+               }
+               else {
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
-                                    cbp->b_lblkno, cbp->b_blkno, upl_offset, io_size, 0);
+                                    (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
+               }
 
                if (cbp_head) {
                        cbp_tail->b_trans_next = cbp;
@@ -611,139 +1252,173 @@ cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags,
                } else {
                        cbp_head = cbp;
                        cbp_tail = cbp;
+
+                       if ( (cbp_head->b_real_bp = real_bp) ) {
+                               cbp_head->b_flags |= B_NEED_IODONE;
+                               real_bp = (buf_t)NULL;
+                       }
                }
-               (struct buf *)(cbp->b_trans_head) = cbp_head;
-               buf_count++;
+               *(buf_t *)(&cbp->b_trans_head) = cbp_head;
+
+               trans_count++;
 
                upl_offset += io_size;
                f_offset   += io_size;
                size       -= io_size;
+               /*
+                * keep track of how much of the original request
+                * that we've actually completed... non_rounded_size
+                * may go negative due to us rounding the request
+                * to a page size multiple (i.e.  size > non_rounded_size)
+                */
+               non_rounded_size -= io_size;
 
-               if ( (!(upl_offset & PAGE_MASK) && !(flags & CL_DEV_MEMORY) && ((flags & CL_ASYNC) || buf_count > 8)) || size == 0) {
+               if (non_rounded_size <= 0) {
+                       /*
+                        * we've transferred all of the data in the original
+                        * request, but we were unable to complete the tail
+                        * of the last page because the file didn't have
+                        * an allocation to back that portion... this is ok.
+                        */
+                       size = 0;
+               }
+               if (size == 0) {
+                       /*
+                        * we have no more I/O to issue, so go
+                        * finish the final transaction
+                        */
+                       need_EOT = TRUE;
+               } else if ( ((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
+                           ((flags & CL_ASYNC) || trans_count > max_trans_count) ) {
                        /*
-                        * if we have no more I/O to issue or
+                        * I/O directed to physically contiguous memory...
+                        * which doesn't have a requirement to make sure we 'fill' a page
+                        * or... 
                         * the current I/O we've prepared fully
                         * completes the last page in this request
-                        * and it's either an ASYNC request or 
+                        * and ...
+                        * it's either an ASYNC request or 
                         * we've already accumulated more than 8 I/O's into
-                        * this transaction and it's not an I/O directed to 
-                        * special DEVICE memory
-                        * then go ahead and issue the I/O
-                        */
-start_io:              
-                       if (flags & CL_COMMIT)
-                               cbp_head->b_flags |= B_COMMIT_UPL;
-                       if (flags & CL_PAGEOUT)
-                               cbp_head->b_flags |= B_PAGEOUT;
-                       if (flags & CL_PAGEIN)
-                               cbp_head->b_flags |= B_PGIN;
-
-                       if (real_bp) {
-                               cbp_head->b_flags |= B_NEED_IODONE;
-                               cbp_head->b_real_bp = real_bp;
-                       } else
-                               cbp_head->b_real_bp = (struct buf *)NULL;
-
-                       if (size == 0) {
-                               /*
-                                * we're about to issue the last I/O for this upl
-                                * if this was a read to the eof and the eof doesn't
-                                * finish on a page boundary, than we need to zero-fill
-                                * the rest of the page....
-                                */
-                               cbp_head->b_validend = zero_offset;
-                       } else
-                               cbp_head->b_validend = 0;
-                         
-                       for (cbp = cbp_head; cbp;) {
-                               struct buf * cbp_next;
+                        * this transaction so mark it as complete so that
+                        * it can finish asynchronously or via the cluster_complete_transaction
+                        * below if the request is synchronous
+                        */
+                       need_EOT = TRUE;
+               }
+               if (need_EOT == TRUE)
+                       cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
 
-                               if (io_flags & B_WRITEINPROG)
-                                       cbp->b_vp->v_numoutput++;
+               if (flags & CL_THROTTLE)
+                       (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
 
-                               cbp_next = cbp->b_trans_next;
+               if ( !(io_flags & B_READ))
+                       vnode_startwrite(vp);
                                
-                               (void) VOP_STRATEGY(cbp);
-                               cbp = cbp_next;
-                       }
-                       if ( !(flags & CL_ASYNC)) {
-                               for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next)
-                                       biowait(cbp);
+               (void) VNOP_STRATEGY(cbp);
 
-                               if (error = cluster_iodone(cbp_head)) {
-                                       if ((flags & CL_PAGEOUT) && (error == ENXIO))
-                                               retval = 0;     /* drop the error */
-                                       else
-                                               retval = error;
-                                       error  = 0;
-                               }
-                       }
-                       cbp_head = (struct buf *)0;
-                       cbp_tail = (struct buf *)0;
+               if (need_EOT == TRUE) {
+                       if ( !(flags & CL_ASYNC))
+                               cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
 
-                       buf_count = 0;
+                       need_EOT = FALSE;
+                       trans_count = 0;
+                       cbp_head = NULL;
                }
-       }
+        }
        if (error) {
                int abort_size;
 
-               for (cbp = cbp_head; cbp;) {
-                       struct buf * cbp_next;
-                       if (cbp->b_vectorcount > 1)
-                               _FREE(cbp->b_vectorlist, M_SEGMENT);
-                       upl_offset -= cbp->b_bcount;
-                       size       += cbp->b_bcount;
+               io_size = 0;
+               
+               if (cbp_head) {
+                        /*
+                         * first wait until all of the outstanding I/O
+                         * for this partial transaction has completed
+                         */
+                       cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
+
+                       /*
+                        * Rewind the upl offset to the beginning of the
+                        * transaction.
+                        */
+                       upl_offset = cbp_head->b_uploffset;
+
+                       for (cbp = cbp_head; cbp;) {
+                               buf_t   cbp_next;
+        
+                               size       += cbp->b_bcount;
+                               io_size    += cbp->b_bcount;
 
-                       cbp_next = cbp->b_trans_next;
-                       free_io_buf(cbp);
-                       cbp = cbp_next;
+                               cbp_next = cbp->b_trans_next;
+                               free_io_buf(cbp);
+                               cbp = cbp_next;
+                       }
                }
-               pg_offset  = upl_offset & PAGE_MASK;
-               abort_size = ((size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE) * PAGE_SIZE;
+               if (iostate) {
+                       int need_wakeup = 0;
 
-               if (flags & CL_COMMIT) {
-                       int upl_abort_code;
+                       /*
+                        * update the error condition for this stream
+                        * since we never really issued the io
+                        * just go ahead and adjust it back
+                        */
+                       lck_mtx_lock_spin(cl_mtxp);
 
-                       if ((flags & CL_PAGEOUT) && (error != ENXIO)) /* transient error */
-                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
-                       else if (flags & CL_PAGEIN)
-                           upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
-                       else
-                               upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
+                       if (iostate->io_error == 0)
+                               iostate->io_error = error;
+                       iostate->io_issued -= io_size;
 
-                       ubc_upl_abort_range(upl, upl_offset - pg_offset, abort_size,
-                                               upl_abort_code);
+                       if (iostate->io_wanted) {
+                               /*
+                                * someone is waiting for the state of
+                                * this io stream to change
+                                */
+                               iostate->io_wanted = 0;
+                               need_wakeup = 1;
+                       }
+                       lck_mtx_unlock(cl_mtxp);
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
-                                    (int)upl, upl_offset - pg_offset, abort_size, error, 0);
+                       if (need_wakeup)
+                               wakeup((caddr_t)&iostate->io_wanted);
                }
-               if (real_bp) {
-                       real_bp->b_flags |= B_ERROR;
-                       real_bp->b_error  = error;
+               if (flags & CL_COMMIT) {
+                       int     upl_flags;
 
-                       biodone(real_bp);
+                       pg_offset  = upl_offset & PAGE_MASK;
+                       abort_size = (upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK;
+                       
+                       upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags);
+                       
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
+                                    (int)upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
                }
                if (retval == 0)
                        retval = error;
+       } else if (cbp_head)
+                       panic("%s(): cbp_head is not NULL.\n", __FUNCTION__);
+
+       if (real_bp) {
+               /*
+                * can get here if we either encountered an error
+                * or we completely zero-filled the request and
+                * no I/O was issued
+                */
+               if (error) {
+                       real_bp->b_flags |= B_ERROR;
+                       real_bp->b_error = error;
+               }
+               buf_biodone(real_bp);
        }
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END,
-                    (int)f_offset, size, upl_offset, retval, 0);
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
 
        return (retval);
 }
 
 
 static int
-cluster_rd_prefetch(vp, f_offset, size, filesize, devblocksize)
-       struct vnode *vp;
-       off_t         f_offset;
-       u_int         size;
-       off_t         filesize;
-       int           devblocksize;
+cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
 {
-       int           pages_to_fetch;
-       int           skipped_pages;
+       int           pages_in_prefetch;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
                     (int)f_offset, size, (int)filesize, 0, 0);
@@ -753,113 +1428,128 @@ cluster_rd_prefetch(vp, f_offset, size, filesize, devblocksize)
                             (int)f_offset, 0, 0, 0, 0);
                return(0);
        }
-       if (size > (MAX_UPL_TRANSFER * PAGE_SIZE))
-               size = MAX_UPL_TRANSFER * PAGE_SIZE;
-       else
-               size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
-
         if ((off_t)size > (filesize - f_offset))
                 size = filesize - f_offset;
-       
-       pages_to_fetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
+       pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
 
-       for (skipped_pages = 0; skipped_pages < pages_to_fetch; skipped_pages++) {
-               if (ubc_page_op(vp, f_offset, 0, 0, 0) != KERN_SUCCESS)
-                       break;
-               f_offset += PAGE_SIZE;
-               size     -= PAGE_SIZE;
-       }
-       if (skipped_pages < pages_to_fetch)
-               advisory_read(vp, filesize, f_offset, size, devblocksize);
+       advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
-                    (int)f_offset + (pages_to_fetch * PAGE_SIZE), skipped_pages, 0, 1, 0);
+                    (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
 
-       return (pages_to_fetch);
+       return (pages_in_prefetch);
 }
 
 
 
 static void
-cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize)
-       struct vnode *vp;
-       daddr_t       b_lblkno;
-       daddr_t       e_lblkno;
-       off_t         filesize;
-       int           devblocksize;
+cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
+                  int bflag)
 {
-       daddr_t       r_lblkno;
-       off_t         f_offset;
-       int           size_of_prefetch;
-       int           max_pages;
+       daddr64_t       r_addr;
+       off_t           f_offset;
+       int             size_of_prefetch;
+
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
-                    b_lblkno, e_lblkno, vp->v_lastr, 0, 0);
+                    (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
 
-       if (b_lblkno == vp->v_lastr && b_lblkno == e_lblkno) {
+       if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
-                            vp->v_ralen, vp->v_maxra, vp->v_lastr, 0, 0);
+                            rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
                return;
        }
-
-       if (vp->v_lastr == -1 || (b_lblkno != vp->v_lastr && b_lblkno != (vp->v_lastr + 1) &&
-                                (b_lblkno != (vp->v_maxra + 1) || vp->v_ralen == 0))) {
-               vp->v_ralen = 0;
-               vp->v_maxra = 0;
+       if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
+               rap->cl_ralen = 0;
+               rap->cl_maxra = 0;
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
-                            vp->v_ralen, vp->v_maxra, vp->v_lastr, 1, 0);
+                            rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
 
                return;
        }
-       max_pages = MAX_UPL_TRANSFER;
-
-       vp->v_ralen = vp->v_ralen ? min(max_pages, vp->v_ralen << 1) : 1;
-
-       if (((e_lblkno + 1) - b_lblkno) > vp->v_ralen)
-               vp->v_ralen = min(max_pages, (e_lblkno + 1) - b_lblkno);
-
-       if (e_lblkno < vp->v_maxra) {
-               if ((vp->v_maxra - e_lblkno) > max(max_pages / 16, 4)) {
+       if (extent->e_addr < rap->cl_maxra) {
+               if ((rap->cl_maxra - extent->e_addr) > ((MAX_PREFETCH / PAGE_SIZE) / 4)) {
 
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
-                                    vp->v_ralen, vp->v_maxra, vp->v_lastr, 2, 0);
+                                    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
                        return;
                }
        }
-       r_lblkno = max(e_lblkno, vp->v_maxra) + 1;
-       f_offset = (off_t)r_lblkno * PAGE_SIZE_64;
+       r_addr = max(extent->e_addr, rap->cl_maxra) + 1;
+       f_offset = (off_t)(r_addr * PAGE_SIZE_64);
+
+        size_of_prefetch = 0;
 
+       ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
+
+       if (size_of_prefetch) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
+                            rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
+               return;
+       }
        if (f_offset < filesize) {
-               size_of_prefetch = cluster_rd_prefetch(vp, f_offset, vp->v_ralen * PAGE_SIZE, filesize, devblocksize);
+               daddr64_t read_size;
+
+               rap->cl_ralen = rap->cl_ralen ? min(MAX_PREFETCH / PAGE_SIZE, rap->cl_ralen << 1) : 1;
+
+               read_size = (extent->e_addr + 1) - extent->b_addr;
+
+               if (read_size > rap->cl_ralen) {
+                       if (read_size > MAX_PREFETCH / PAGE_SIZE)
+                               rap->cl_ralen = MAX_PREFETCH / PAGE_SIZE;
+                       else
+                               rap->cl_ralen = read_size;
+               }
+               size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
 
                if (size_of_prefetch)
-                       vp->v_maxra = (r_lblkno + size_of_prefetch) - 1;
+                       rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
        }
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
-                    vp->v_ralen, vp->v_maxra, vp->v_lastr, 3, 0);
+                    rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
 }
 
+
+int
+cluster_pageout(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
+               int size, off_t filesize, int flags)
+{
+        return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
+
+}
+
+
 int
-cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags)
-       struct vnode *vp;
-       upl_t         upl;
-       vm_offset_t   upl_offset;
-       off_t         f_offset;
-       int           size;
-       off_t         filesize;
-       int           devblocksize;
-       int           flags;
+cluster_pageout_ext(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
+               int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
 {
        int           io_size;
-       int           pg_size;
+       int           rounded_size;
         off_t         max_size;
-       int local_flags = CL_PAGEOUT;
+       int           local_flags;
+
+       if (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)
+               /*
+                * if we know we're issuing this I/O to a virtual device (i.e. disk image)
+                * then we don't want to enforce this throttle... if we do, we can 
+                * potentially deadlock since we're stalling the pageout thread at a time
+                * when the disk image might need additional memory (which won't be available
+                * if the pageout thread can't run)... instead we'll just depend on the throttle
+                * that the pageout thread now has in place to deal with external files
+                */
+               local_flags = CL_PAGEOUT;
+       else
+               local_flags = CL_PAGEOUT | CL_THROTTLE;
 
        if ((flags & UPL_IOSYNC) == 0) 
                local_flags |= CL_ASYNC;
        if ((flags & UPL_NOCOMMIT) == 0) 
                local_flags |= CL_COMMIT;
+       if ((flags & UPL_KEEPCACHED))
+               local_flags |= CL_KEEPCACHED;
+       if (flags & IO_PASSIVE) 
+               local_flags |= CL_PASSIVE;
 
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
@@ -897,32 +1587,29 @@ cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, fla
        else
                io_size = max_size;
 
-       pg_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
+       rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
 
-       if (size > pg_size) {
+       if (size > rounded_size) {
                if (local_flags & CL_COMMIT)
-                       ubc_upl_abort_range(upl, upl_offset + pg_size, size - pg_size,
+                       ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
                                        UPL_ABORT_FREE_ON_EMPTY);
        }
-       while (vp->v_numoutput >= ASYNC_THROTTLE) {
-               vp->v_flag |= VTHROTTLED;
-               tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_pageout", 0);
-       }
+       return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
+                          local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg));
+}
+
 
-       return (cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize,
-                          local_flags, (struct buf *)0));
+int
+cluster_pagein(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
+              int size, off_t filesize, int flags)
+{
+        return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
 }
 
+
 int
-cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags)
-       struct vnode *vp;
-       upl_t         upl;
-       vm_offset_t   upl_offset;
-       off_t         f_offset;
-       int           size;
-       off_t         filesize;
-       int           devblocksize;
-       int           flags;
+cluster_pagein_ext(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
+              int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
 {
        u_int         io_size;
        int           rounded_size;
@@ -937,6 +1624,8 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag
                local_flags |= CL_ASYNC;
        if ((flags & UPL_NOCOMMIT) == 0) 
                local_flags |= CL_COMMIT;
+       if (flags & IO_PASSIVE) 
+               local_flags |= CL_PASSIVE;
 
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
@@ -965,532 +1654,716 @@ cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flag
 
        if (size > rounded_size && (local_flags & CL_COMMIT))
                ubc_upl_abort_range(upl, upl_offset + rounded_size,
-                                   size - (upl_offset + rounded_size), UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
+                                   size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
        
-       retval = cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize,
-                          local_flags | CL_READ | CL_PAGEIN, (struct buf *)0);
+       retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
+                           local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
 
-       if (retval == 0) {
-               int b_lblkno;
-               int e_lblkno;
+       return (retval);
+}
 
-               b_lblkno = (int)(f_offset / PAGE_SIZE_64);
-               e_lblkno = (int)
-                       ((f_offset + ((off_t)io_size - 1)) / PAGE_SIZE_64);
 
-               if (!(flags & UPL_NORDAHEAD) && !(vp->v_flag & VRAOFF) && rounded_size == PAGE_SIZE) {
-                       /*
-                        * we haven't read the last page in of the file yet
-                        * so let's try to read ahead if we're in 
-                        * a sequential access pattern
-                        */
-                       cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize);
-               }
-               vp->v_lastr = e_lblkno;
-       }
-       return (retval);
+int
+cluster_bp(buf_t bp)
+{
+       return cluster_bp_ext(bp, NULL, NULL);
 }
 
+
 int
-cluster_bp(bp)
-       struct buf *bp;
+cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
 {
         off_t  f_offset;
        int    flags;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
-                    (int)bp, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
+                    (int)bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
 
-       if (bp->b_pagelist == (upl_t) 0)
-               panic("cluster_bp: can't handle NULL upl yet\n");
        if (bp->b_flags & B_READ)
                flags = CL_ASYNC | CL_READ;
        else
                flags = CL_ASYNC;
+       if (bp->b_flags & B_PASSIVE) 
+               flags |= CL_PASSIVE;
 
        f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
 
-        return (cluster_io(bp->b_vp, bp->b_pagelist, 0, f_offset, bp->b_bcount, 0, flags, bp));
+        return (cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg));
 }
 
+
+
 int
-cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t         oldEOF;
-       off_t         newEOF;
-       off_t         headOff;
-       off_t         tailOff;
-       int           devblocksize;
-       int           flags;
+cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
 {
-       int           prev_resid;
-       int           clip_size;
-       off_t         max_io_size;
-       struct iovec  *iov;
-       vm_offset_t   upl_offset;
-       int           upl_size;
-       int           pages_in_pl;
-       upl_page_info_t *pl;
-       int           upl_flags;
-       upl_t         upl;
-       int           retval = 0;
+        return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
+}
 
 
-       if ((!uio) || (uio->uio_segflg != UIO_USERSPACE) || (!(vp->v_flag & VNOCACHE_DATA)))
-         {
-           retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
-           return(retval);
-         }
-       
-       while (uio->uio_resid && uio->uio_offset < newEOF && retval == 0)
-         {
-           /* we know we have a resid, so this is safe */
-           iov = uio->uio_iov;
-           while (iov->iov_len == 0) {
-             uio->uio_iov++;
-             uio->uio_iovcnt--;
-             iov = uio->uio_iov;
-           }
-
-            /*
-             * We check every vector target and if it is physically
-             * contiguous space, we skip the sanity checks.
-             */
-
-            upl_offset = (vm_offset_t)iov->iov_base & ~PAGE_MASK;
-            upl_size = (upl_offset + PAGE_SIZE +(PAGE_SIZE -1)) & ~PAGE_MASK;
-           pages_in_pl = 0;
-            upl_flags = UPL_QUERY_OBJECT_TYPE;
-            if ((vm_map_get_upl(current_map(),
-                               (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                               &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0)) != KERN_SUCCESS)
-              {
-               /*
-                * the user app must have passed in an invalid address
+int
+cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
+                 int xflags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+        user_ssize_t   cur_resid;
+       int             retval = 0;
+       int             flags;
+        int            zflags;
+       int             bflag;
+       int             write_type = IO_COPY;
+       u_int32_t       write_length;
+
+       flags = xflags;
+
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
+
+       if (vp->v_flag & VNOCACHE_DATA)
+               flags |= IO_NOCACHE;
+
+        if (uio == NULL) {
+               /*
+                * no user data...
+                * this call is being made to zero-fill some range in the file
                 */
-               return (EFAULT);
-              }              
+               retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
 
-            if (upl_flags & UPL_PHYS_CONTIG)
-             {
-               /*
-                * since the interface to the IOKit below us uses physical block #'s and
-                * block counts to specify the I/O, we can't handle anything that isn't
-                * devblocksize aligned 
+               return(retval);
+       }
+        /*
+         * do a write through the cache if one of the following is true....
+         *   NOCACHE is not true and
+         *   the uio request doesn't target USERSPACE
+         * otherwise, find out if we want the direct or contig variant for
+         * the first vector in the uio request
+         */
+        if ( (flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
+               retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
+
+        if ( (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT)
+               /*
+                * must go through the cached variant in this case
                 */
-               if ((uio->uio_offset & (devblocksize - 1)) || (uio->uio_resid & (devblocksize - 1)))
-                   return(EINVAL);
+               write_type = IO_COPY;
 
-               if (flags & IO_HEADZEROFILL)
-                 {
-                   flags &= ~IO_HEADZEROFILL;
+       while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
+         
+               switch (write_type) {
+
+               case IO_COPY:
+                       /*
+                        * make sure the uio_resid isn't too big...
+                        * internally, we want to handle all of the I/O in
+                        * chunk sizes that fit in a 32 bit int
+                        */
+                       if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
+                               /*
+                                * we're going to have to call cluster_write_copy
+                                * more than once...
+                                *
+                                * only want the last call to cluster_write_copy to
+                                * have the IO_TAILZEROFILL flag set and only the
+                                * first call should have IO_HEADZEROFILL
+                                */
+                               zflags = flags & ~IO_TAILZEROFILL;
+                               flags &= ~IO_HEADZEROFILL;
+
+                               write_length = MAX_IO_REQUEST_SIZE;
+                       } else {
+                               /*
+                                * last call to cluster_write_copy
+                                */
+                               zflags = flags;
+                         
+                               write_length = (u_int32_t)cur_resid;
+                       }
+                       retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
+                       break;
+
+               case IO_CONTIG:
+                       zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
+
+                       if (flags & IO_HEADZEROFILL) {
+                               /*
+                                * only do this once per request
+                                */
+                               flags &= ~IO_HEADZEROFILL;
+
+                               retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
+                                                           headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
+                               if (retval)
+                                       break;
+                       }
+                       retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
+
+                       if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
+                               /*
+                                * we're done with the data from the user specified buffer(s)
+                                * and we've been requested to zero fill at the tail
+                                * treat this as an IO_HEADZEROFILL which doesn't require a uio
+                                * by rearranging the args and passing in IO_HEADZEROFILL
+                                */
+                               retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset,
+                                                           (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
+                       }
+                       break;
+
+               case IO_DIRECT:
+                       /*
+                        * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
+                        */
+                       retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
+                       break;
+
+               case IO_UNKNOWN:
+                       retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
+                       break;
+               }
+       }
+       return (retval);
+}
 
-                   if (retval = cluster_write_x(vp, (struct uio *)0, 0, uio->uio_offset, headOff, 0, devblocksize, IO_HEADZEROFILL))
-                       return(retval);
-                 }
-
-               retval = cluster_phys_write(vp, uio, newEOF);
-
-               if (uio->uio_resid == 0 && (flags & IO_TAILZEROFILL))
-                 {
-                   retval = cluster_write_x(vp, (struct uio *)0, 0, tailOff, uio->uio_offset, 0, devblocksize, IO_HEADZEROFILL);
-                   return(retval);
-                 }
-             }
-           else if ((uio->uio_resid < 4 * PAGE_SIZE) || (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL))) 
-             {
-               /*
-                * We set a threshhold of 4 pages to decide if the nocopy
-                * write loop is worth the trouble...
-                * we also come here if we're trying to zero the head and/or tail
-                * of a partially written page, and the user source is not a physically contiguous region
-                */
-               retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
-               return(retval);
-             }
-           else if (uio->uio_offset & PAGE_MASK_64)
-             {
-               /* Bring the file offset write up to a pagesize boundary */
-               clip_size = (PAGE_SIZE - (uio->uio_offset & PAGE_MASK_64));
-               if (uio->uio_resid < clip_size)
-                 clip_size = uio->uio_resid;
-               /* 
-                * Fake the resid going into the cluster_write_x call
-                * and restore it on the way out.
-                */
-               prev_resid = uio->uio_resid;
-               uio->uio_resid = clip_size;
-               retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
-               uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-             }
-           else if ((int)iov->iov_base & PAGE_MASK_64)
-             {
-               clip_size = iov->iov_len;
-               prev_resid = uio->uio_resid;
-               uio->uio_resid = clip_size;
-               retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
-               uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-             }
-           else
-             {
-               /* 
-                * If we come in here, we know the offset into
-                * the file is on a pagesize boundary
-                */
-
-               max_io_size = newEOF - uio->uio_offset;
-               clip_size = uio->uio_resid;
-               if (iov->iov_len < clip_size)
-                 clip_size = iov->iov_len;
-               if (max_io_size < clip_size)
-                 clip_size = max_io_size;
-
-               if (clip_size < PAGE_SIZE)
-                 {
-                   /*
-                    * Take care of tail end of write in this vector
-                    */
-                   prev_resid = uio->uio_resid;
-                   uio->uio_resid = clip_size;
-                   retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
-                   uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-                 }
-               else
-                 {
-                   /* round clip_size down to a multiple of pagesize */
-                   clip_size = clip_size & ~(PAGE_MASK);
-                   prev_resid = uio->uio_resid;
-                   uio->uio_resid = clip_size;
-                   retval = cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags);
-                   if ((retval == 0) && uio->uio_resid)
-                     retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
-                   uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-                 }
-             } /* end else */
-         } /* end while */
-       return(retval);
-}
 
 static int
-cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t         newEOF;
-       int           devblocksize;
-       int           flags;
+cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
+                    int flags, int (*callback)(buf_t, void *), void *callback_arg)
 {
        upl_t            upl;
        upl_page_info_t  *pl;
-       off_t            upl_f_offset;
        vm_offset_t      upl_offset;
-       off_t            max_io_size;
+       u_int32_t        io_req_size;
+       u_int32_t        offset_in_file;
+       u_int32_t        offset_in_iovbase;
        int              io_size;
-       int              upl_size;
-       int              upl_needed_size;
-       int              pages_in_pl;
+       int              io_flag;
+       int              bflag;
+       vm_size_t        upl_size;
+       vm_size_t        upl_needed_size;
+       mach_msg_type_number_t  pages_in_pl;
        int              upl_flags;
        kern_return_t    kret;
-       struct iovec     *iov;
-       int              i;
+       mach_msg_type_number_t  i;
        int              force_data_sync;
-       int              error  = 0;
-
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
-                    (int)uio->uio_offset, (int)uio->uio_resid, 
-                    (int)newEOF, devblocksize, 0);
+       int              retval = 0;
+       int              first_IO = 1;
+       struct clios     iostate;
+       user_addr_t      iov_base;
+       u_int32_t        mem_alignment_mask;
+       u_int32_t        devblocksize;
+
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
 
        /*
         * When we enter this routine, we know
-        *  -- the offset into the file is on a pagesize boundary
-        *  -- the resid is a page multiple
         *  -- the resid will not exceed iov_len
         */
-       cluster_try_push(vp, newEOF, 0, 1);
-
-       iov = uio->uio_iov;
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
+                    (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
+
+       iostate.io_completed = 0;
+       iostate.io_issued = 0;
+       iostate.io_error = 0;
+       iostate.io_wanted = 0;
+
+       mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
+       devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
+
+       if (devblocksize == 1) {
+               /*
+                * the AFP client advertises a devblocksize of 1
+                * however, its BLOCKMAP routine maps to physical
+                * blocks that are PAGE_SIZE in size...
+                * therefore we can't ask for I/Os that aren't page aligned
+                * or aren't multiples of PAGE_SIZE in size
+                * by setting devblocksize to PAGE_SIZE, we re-instate
+                * the old behavior we had before the mem_alignment_mask
+                * changes went in...
+                */
+               devblocksize = PAGE_SIZE;
+       }
 
-       while (uio->uio_resid && uio->uio_offset < newEOF && error == 0) {
-         io_size = uio->uio_resid;
+next_dwrite:
+       io_req_size = *write_length;
+       iov_base = uio_curriovbase(uio);
 
-          if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
-            io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+       offset_in_file = (u_int32_t)uio->uio_offset & PAGE_MASK;
+       offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
 
-         upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
-         upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
+       if (offset_in_file || offset_in_iovbase) {
+               /*
+                * one of the 2 important offsets is misaligned
+                * so fire an I/O through the cache for this entire vector
+                */
+               goto wait_for_dwrites;
+       }
+       if (iov_base & (devblocksize - 1)) {
+               /*
+                * the offset in memory must be on a device block boundary
+                * so that we can guarantee that we can generate an
+                * I/O that ends on a page boundary in cluster_io
+                */
+               goto wait_for_dwrites;
+        }
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
-                      (int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
+       while (io_req_size >= PAGE_SIZE && uio->uio_offset < newEOF && retval == 0) {
 
-         for (force_data_sync = 0; force_data_sync < 3; force_data_sync++)
-           {
-             pages_in_pl = 0;
-             upl_size = upl_needed_size;
-             upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
-                         UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+               if (first_IO) {
+                       cluster_syncup(vp, newEOF, callback, callback_arg);
+                       first_IO = 0;
+               }
+               io_size  = io_req_size & ~PAGE_MASK;
+               iov_base = uio_curriovbase(uio);
+
+               if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
+                       io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+
+               upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
+               upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
+
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
+                            (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
+
+               for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
+                       pages_in_pl = 0;
+                       upl_size = upl_needed_size;
+                       upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
+                                   UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
+
+                       kret = vm_map_get_upl(current_map(),
+                                             (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
+                                             &upl_size,
+                                             &upl, 
+                                             NULL, 
+                                             &pages_in_pl,
+                                             &upl_flags,
+                                             force_data_sync);
+
+                       if (kret != KERN_SUCCESS) {
+                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+                                            0, 0, 0, kret, 0);
+                               /*
+                                * failed to get pagelist
+                                *
+                                * we may have already spun some portion of this request
+                                * off as async requests... we need to wait for the I/O
+                                * to complete before returning
+                                */
+                               goto wait_for_dwrites;
+                       }
+                       pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
+                       pages_in_pl = upl_size / PAGE_SIZE;
 
-             kret = vm_map_get_upl(current_map(),
-                                   (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                                   &upl_size,
-                                       &upl, 
-                                       NULL, 
-                                       &pages_in_pl,
-                                       &upl_flags,
-                                       force_data_sync);
+                       for (i = 0; i < pages_in_pl; i++) {
+                               if (!upl_valid_page(pl, i))
+                                       break;            
+                       }
+                       if (i == pages_in_pl)
+                               break;
 
-             if (kret != KERN_SUCCESS)
-               {
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
-                              0, 0, 0, kret, 0);
+                       /*
+                        * didn't get all the pages back that we
+                        * needed... release this upl and try again
+                        */
+                       ubc_upl_abort(upl, 0);
+               }
+               if (force_data_sync >= 3) {
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+                                    i, pages_in_pl, upl_size, kret, 0);
+                       /*
+                        * for some reason, we couldn't acquire a hold on all
+                        * the pages needed in the user's address space
+                        *
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_dwrites;
+               }
 
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
-                              (int)uio->uio_offset, (int)uio->uio_resid, kret, 1, 0);
+               /*
+                * Consider the possibility that upl_size wasn't satisfied.
+                */
+               if (upl_size < upl_needed_size) {
+                       if (upl_size && upl_offset == 0)
+                               io_size = upl_size;
+                       else
+                               io_size = 0;
+               }
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+                            (int)upl_offset, upl_size, (int)iov_base, io_size, 0);                    
 
-                 /* cluster_nocopy_write: failed to get pagelist */
-                 /* do not return kret here */
-                 return(0);
+               if (io_size == 0) {
+                       ubc_upl_abort(upl, 0);
+                       /*
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_dwrites;
                }
 
-             pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
-             pages_in_pl = upl_size / PAGE_SIZE;
+               /*
+                * Now look for pages already in the cache
+                * and throw them away.
+                * uio->uio_offset is page aligned within the file
+                * io_size is a multiple of PAGE_SIZE
+                */
+               ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
 
-             for(i=0; i < pages_in_pl; i++)
-               {
-                 if (!upl_valid_page(pl, i))
-                   break;                
-               }
+               /*
+                * we want push out these writes asynchronously so that we can overlap
+                * the preparation of the next I/O
+                * if there are already too many outstanding writes
+                * wait until some complete before issuing the next
+                */
+               lck_mtx_lock(cl_mtxp);
 
-             if (i == pages_in_pl)
-               break;
+               while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
+                       iostate.io_wanted = 1;
+                       msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
+               }       
+               lck_mtx_unlock(cl_mtxp);
+
+               if (iostate.io_error) {
+                       /*
+                        * one of the earlier writes we issued ran into a hard error
+                        * don't issue any more writes, cleanup the UPL
+                        * that was just created but not used, then
+                        * go wait for all writes that are part of this stream
+                        * to complete before returning the error to the caller
+                        */
+                       ubc_upl_abort(upl, 0);
+
+                       goto wait_for_dwrites;
+               }
+               io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO | bflag;
+
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
+                            (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
 
-               ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                               UPL_ABORT_FREE_ON_EMPTY);
-           }
-
-         if (force_data_sync >= 3)
-           {
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
-                          i, pages_in_pl, upl_size, kret, 0);
-
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
-                          (int)uio->uio_offset, (int)uio->uio_resid, kret, 2, 0);
-             return(0);
-           }
-
-         /*
-          * Consider the possibility that upl_size wasn't satisfied.
-          */
-         if (upl_size != upl_needed_size)
-           io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
-                      (int)upl_offset, upl_size, (int)iov->iov_base, io_size, 0);                     
-
-         if (io_size == 0)
-           {
-             ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                  UPL_ABORT_FREE_ON_EMPTY);
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
-                    (int)uio->uio_offset, uio->uio_resid, 0, 3, 0);
-
-             return(0);
-           }
-
-         /*
-          * Now look for pages already in the cache
-          * and throw them away.
-          */
-
-         upl_f_offset = uio->uio_offset;   /* this is page aligned in the file */
-         max_io_size = io_size;
-
-         while (max_io_size) {
-
-           /*
-            * Flag UPL_POP_DUMP says if the page is found
-            * in the page cache it must be thrown away.
-            */
-           ubc_page_op(vp, 
-                       upl_f_offset,
-                       UPL_POP_SET | UPL_POP_BUSY | UPL_POP_DUMP,
-                       0, 0);
-           max_io_size  -= PAGE_SIZE;
-           upl_f_offset += PAGE_SIZE;
-         }
-
-         /*
-          * issue a synchronous write to cluster_io
-          */
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
-                      (int)upl_offset, (int)uio->uio_offset, io_size, 0, 0);
-
-         error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
-                            io_size, devblocksize, 0, (struct buf *)0);
-
-         if (error == 0) {
-           /*
-            * The cluster_io write completed successfully,
-            * update the uio structure.
-            */
-           iov->iov_base += io_size;
-           iov->iov_len -= io_size;
-           uio->uio_resid -= io_size;
-           uio->uio_offset += io_size;
-         }
-         /*
-          * always 'commit' the I/O via the abort primitive whether the I/O
-          * succeeded cleanly or not... this is necessary to insure that 
-          * we preserve the state of the DIRTY flag on the pages used to
-          * provide the data for the I/O... the state of this flag SHOULD
-          * NOT be changed by a write
-          */
-         ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                             UPL_ABORT_FREE_ON_EMPTY);
-
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
-                      (int)upl_offset, (int)uio->uio_offset, (int)uio->uio_resid, error, 0);
+               retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
+                                  io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
+
+               /*
+                * update the uio structure to
+                * reflect the I/O that we just issued
+                */
+               uio_update(uio, (user_size_t)io_size);
+
+               io_req_size -= io_size;
+
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
+                            (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
 
        } /* end while */
 
+        if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
+
+               retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE);
+
+               if (retval == 0 && *write_type == IO_DIRECT) {
+
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
+                                    (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
 
+                       goto next_dwrite;
+               }
+        }
+
+wait_for_dwrites:
+       if (iostate.io_issued) {
+               /*
+                * make sure all async writes issued as part of this stream
+                * have completed before we return
+                */
+               lck_mtx_lock(cl_mtxp);
+
+               while (iostate.io_issued != iostate.io_completed) {
+                       iostate.io_wanted = 1;
+                       msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
+               }       
+               lck_mtx_unlock(cl_mtxp);
+       }
+       if (iostate.io_error)
+               retval = iostate.io_error;
+
+       if (io_req_size && retval == 0) {
+               /*
+                * we couldn't handle the tail of this request in DIRECT mode
+                * so fire it through the copy path
+                *
+                * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
+                * so we can just pass 0 in for the headOff and tailOff
+                */
+               retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
+
+               *write_type = IO_UNKNOWN;
+       }
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
-                    (int)uio->uio_offset, (int)uio->uio_resid, error, 4, 0);
+                    (int)uio->uio_offset, io_req_size, retval, 4, 0);
 
-       return (error);
+       return (retval);
 }
 
+
 static int
-cluster_phys_write(vp, uio, newEOF)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t        newEOF;
+cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
+                    int (*callback)(buf_t, void *), void *callback_arg, int bflag)
 {
-       upl_t            upl;
+       upl_page_info_t *pl;
+       addr64_t         src_paddr = 0;
+       upl_t            upl[MAX_VECTS];
        vm_offset_t      upl_offset;
-       int              io_size;
-       int              upl_size;
-       int              upl_needed_size;
-       int              pages_in_pl;
+       u_int32_t        tail_size = 0;
+       u_int32_t        io_size;
+       u_int32_t        xsize;
+       vm_size_t        upl_size;
+       vm_size_t        upl_needed_size;
+       mach_msg_type_number_t  pages_in_pl;
        int              upl_flags;
        kern_return_t    kret;
-       struct iovec     *iov;
+        struct clios     iostate;
        int              error  = 0;
+       int              cur_upl = 0;
+       int              num_upl = 0;
+       int              n;
+       user_addr_t      iov_base;
+       u_int32_t        devblocksize;
+       u_int32_t        mem_alignment_mask;
 
        /*
         * When we enter this routine, we know
-        *  -- the resid will not exceed iov_len
-        *  -- the vector target address is physcially contiguous
+        *  -- the io_req_size will not exceed iov_len
+        *  -- the target address is physically contiguous
         */
-       cluster_try_push(vp, newEOF, 0, 1);
+       cluster_syncup(vp, newEOF, callback, callback_arg);
+
+       devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
+       mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
+
+        iostate.io_completed = 0;
+        iostate.io_issued = 0;
+        iostate.io_error = 0;
+        iostate.io_wanted = 0;
+
+next_cwrite:
+       io_size = *write_length;
+
+       iov_base = uio_curriovbase(uio);
 
-       iov = uio->uio_iov;
-       io_size = iov->iov_len;
-       upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
+       upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
        upl_needed_size = upl_offset + io_size;
 
        pages_in_pl = 0;
        upl_size = upl_needed_size;
        upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | 
-                   UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+                   UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
 
        kret = vm_map_get_upl(current_map(),
-                             (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                             &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
+                             (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
+                             &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
 
-       if (kret != KERN_SUCCESS)
-         {
-           /* cluster_phys_write: failed to get pagelist */
-             /* note: return kret here */
-             return(EINVAL);
-         }
+       if (kret != KERN_SUCCESS) {
+               /*
+                * failed to get pagelist
+                */
+               error = EINVAL;
+               goto wait_for_cwrites;
+       }
+       num_upl++;
 
        /*
         * Consider the possibility that upl_size wasn't satisfied.
-        * This is a failure in the physical memory case.
         */
-       if (upl_size < upl_needed_size)
-         {
-           kernel_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
-           return(EINVAL);
-         }
+       if (upl_size < upl_needed_size) {
+               /*
+                * This is a failure in the physical memory case.
+                */
+               error = EINVAL;
+               goto wait_for_cwrites;
+       }
+       pl = ubc_upl_pageinfo(upl[cur_upl]);
 
-       /*
-        * issue a synchronous write to cluster_io
-        */
+       src_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
 
-       error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
-                          io_size, 0, CL_DEV_MEMORY, (struct buf *)0);
+       while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
+               u_int32_t   head_size;
 
-       if (error == 0) {
-         /*
-          * The cluster_io write completed successfully,
-          * update the uio structure and commit.
-          */
-
-         ubc_upl_commit_range(upl, 0, upl_size, UPL_COMMIT_FREE_ON_EMPTY);
-           
-         iov->iov_base += io_size;
-         iov->iov_len -= io_size;
-         uio->uio_resid -= io_size;
-         uio->uio_offset += io_size;
+               head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
+
+               if (head_size > io_size)
+                       head_size = io_size;
+
+               error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
+
+               if (error)
+                       goto wait_for_cwrites;
+
+               upl_offset += head_size;
+               src_paddr  += head_size;
+               io_size    -= head_size;
+
+               iov_base   += head_size;
        }
-       else
-         ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+       if ((u_int32_t)iov_base & mem_alignment_mask) {
+               /*
+                * request doesn't set up on a memory boundary
+                * the underlying DMA engine can handle...
+                * return an error instead of going through
+                * the slow copy path since the intent of this
+                * path is direct I/O from device memory
+                */
+               error = EINVAL;
+               goto wait_for_cwrites;
+       }
+
+       tail_size = io_size & (devblocksize - 1);
+       io_size  -= tail_size;
+
+       while (io_size && error == 0) {
+
+               if (io_size > MAX_IO_CONTIG_SIZE)
+                       xsize = MAX_IO_CONTIG_SIZE;
+               else
+                       xsize = io_size;
+               /*
+                * request asynchronously so that we can overlap
+                * the preparation of the next I/O... we'll do
+                * the commit after all the I/O has completed
+                * since its all issued against the same UPL
+                * if there are already too many outstanding writes
+                * wait until some have completed before issuing the next
+                */
+               if (iostate.io_issued) {
+                       lck_mtx_lock(cl_mtxp);
+
+                       while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_IO_CONTIG_SIZE)) {
+                               iostate.io_wanted = 1;
+                               msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
+                       }
+                       lck_mtx_unlock(cl_mtxp);
+               }
+                if (iostate.io_error) {
+                        /*
+                         * one of the earlier writes we issued ran into a hard error
+                         * don't issue any more writes...
+                         * go wait for all writes that are part of this stream
+                         * to complete before returning the error to the caller
+                         */
+                       goto wait_for_cwrites;
+               }
+               /*
+                * issue an asynchronous write to cluster_io
+                */
+               error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
+                                  xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
+
+               if (error == 0) {
+                       /*
+                        * The cluster_io write completed successfully,
+                        * update the uio structure
+                        */
+                       uio_update(uio, (user_size_t)xsize);
+
+                       upl_offset += xsize;
+                       src_paddr  += xsize;
+                       io_size    -= xsize;
+               }
+       }
+        if (error == 0 && iostate.io_error == 0 && tail_size == 0) {
+
+               error = cluster_io_type(uio, write_type, write_length, 0);
+
+               if (error == 0 && *write_type == IO_CONTIG) {
+                       cur_upl++;
+                        goto next_cwrite;
+               }
+       } else
+               *write_type = IO_UNKNOWN;
+
+wait_for_cwrites:
+       /*
+         * make sure all async writes that are part of this stream
+         * have completed before we proceed
+         */
+        lck_mtx_lock(cl_mtxp);
+
+        while (iostate.io_issued != iostate.io_completed) {
+               iostate.io_wanted = 1;
+               msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
+        }
+        lck_mtx_unlock(cl_mtxp);
+
+        if (iostate.io_error)
+               error = iostate.io_error;
+
+       if (error == 0 && tail_size)
+               error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
+
+        for (n = 0; n < num_upl; n++)
+               /*
+                * just release our hold on each physically contiguous
+                * region without changing any state
+                */
+               ubc_upl_abort(upl[n], 0);
 
        return (error);
 }
 
+
 static int
-cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t         oldEOF;
-       off_t         newEOF;
-       off_t         headOff;
-       off_t         tailOff;
-       int           devblocksize;
-       int           flags;
+cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
+                  off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
 {
        upl_page_info_t *pl;
        upl_t            upl;
-       vm_offset_t      upl_offset;
-       int              upl_size;
+       vm_offset_t      upl_offset = 0;
+       vm_size_t        upl_size;
        off_t            upl_f_offset;
        int              pages_in_upl;
        int              start_offset;
        int              xfer_resid;
        int              io_size;
-       int              io_flags;
-       vm_offset_t      io_address;
        int              io_offset;
        int              bytes_to_zero;
        int              bytes_to_move;
        kern_return_t    kret;
        int              retval = 0;
-       int              uio_resid;
+       int              io_resid;
        long long        total_size;
        long long        zero_cnt;
        off_t            zero_off;
        long long        zero_cnt1;
        off_t            zero_off1;
-       daddr_t          start_blkno;
-       daddr_t          last_blkno;
+       struct cl_extent cl;
+        int              intersection;
+       struct cl_writebehind *wbp;
+       int              bflag;
+
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
 
        if (uio) {
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
-                            (int)uio->uio_offset, uio->uio_resid, (int)oldEOF, (int)newEOF, 0);
+                            (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
 
-               uio_resid = uio->uio_resid;
+               io_resid = io_req_size;
        } else {
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
                             0, 0, (int)oldEOF, (int)newEOF, 0);
 
-               uio_resid = 0;
+               io_resid = 0;
        }
        zero_cnt  = 0;
        zero_cnt1 = 0;
+       zero_off  = 0;
+       zero_off1 = 0;
 
        if (flags & IO_HEADZEROFILL) {
                /*
@@ -1512,27 +2385,26 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
        }
        if (flags & IO_TAILZEROFILL) {
                if (uio) {
-                       zero_off1 = uio->uio_offset + uio->uio_resid;
+                       zero_off1 = uio->uio_offset + io_req_size;
 
                        if (zero_off1 < tailOff)
                                zero_cnt1 = tailOff - zero_off1;
                }       
        }
-       if (zero_cnt == 0 && uio == (struct uio *) 0)
-         {
-           KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
-                        retval, 0, 0, 0, 0);
-           return (0);
-         }
+       if (zero_cnt == 0 && uio == (struct uio *) 0) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
+                            retval, 0, 0, 0, 0);
+               return (0);
+       }
 
-       while ((total_size = (uio_resid + zero_cnt + zero_cnt1)) && retval == 0) {
+       while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
                /*
                 * for this iteration of the loop, figure out where our starting point is
                 */
                if (zero_cnt) {
                        start_offset = (int)(zero_off & PAGE_MASK_64);
                        upl_f_offset = zero_off - start_offset;
-               } else if (uio_resid) {
+               } else if (io_resid) {
                        start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
                        upl_f_offset = uio->uio_offset - start_offset;
                } else {
@@ -1545,6 +2417,44 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                if (total_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
                        total_size = MAX_UPL_TRANSFER * PAGE_SIZE;
 
+               cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
+               
+               if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
+                       /*
+                        * assumption... total_size <= io_resid
+                        * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
+                        */
+                       if ((start_offset + total_size) > (MAX_UPL_TRANSFER * PAGE_SIZE))
+                               total_size -= start_offset;
+                       xfer_resid = total_size;
+
+                       retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
+                       
+                       if (retval)
+                               break;
+
+                       io_resid    -= (total_size - xfer_resid);
+                       total_size   = xfer_resid;
+                       start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
+                       upl_f_offset = uio->uio_offset - start_offset;
+
+                       if (total_size == 0) {
+                               if (start_offset) {
+                                       /*
+                                        * the write did not finish on a page boundary
+                                        * which will leave upl_f_offset pointing to the
+                                        * beginning of the last page written instead of
+                                        * the page beyond it... bump it in this case
+                                        * so that the cluster code records the last page
+                                        * written as dirty
+                                        */
+                                       upl_f_offset += PAGE_SIZE_64;
+                               }
+                               upl_size = 0;
+                               
+                               goto check_cluster;
+                       }
+               }
                /*
                 * compute the size of the upl needed to encompass
                 * the requested write... limit each call to cluster_io
@@ -1564,20 +2474,25 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                if ((long long)io_size > total_size)
                        io_size = total_size;
 
-               start_blkno = (daddr_t)(upl_f_offset / PAGE_SIZE_64);
-               last_blkno  = start_blkno + pages_in_upl;
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
+                       
 
+               /*
+                * Gather the pages from the buffer cache.
+                * The UPL_WILL_MODIFY flag lets the UPL subsystem know
+                * that we intend to modify these pages.
+                */
                kret = ubc_create_upl(vp, 
-                                                       upl_f_offset,
-                                                       upl_size,
-                                                       &upl,
-                                                       &pl,
-                                                       UPL_FLAGS_NONE);
+                                     upl_f_offset,
+                                     upl_size,
+                                     &upl,
+                                     &pl,
+                                     UPL_SET_LITE | UPL_WILL_MODIFY);
                if (kret != KERN_SUCCESS)
-                       panic("cluster_write: failed to get pagelist");
+                       panic("cluster_write_copy: failed to get pagelist");
 
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_NONE,
-                       (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
+                       (int)upl, (int)upl_f_offset, start_offset, 0, 0);
 
                if (start_offset && !upl_valid_page(pl, 0)) {
                        int   read_size;
@@ -1592,8 +2507,8 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        if ((upl_f_offset + read_size) > newEOF)
                                read_size = newEOF - upl_f_offset;
 
-                       retval = cluster_io(vp, upl, 0, upl_f_offset, read_size, devblocksize,
-                                           CL_READ, (struct buf *)0);
+                       retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
+                                           CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
                        if (retval) {
                                /*
                                 * we had an error during the read which causes us to abort
@@ -1602,7 +2517,9 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                 * there state and mark the failed page in error
                                 */
                                ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES);
-                               ubc_upl_abort_range(upl, 0, upl_size,  UPL_ABORT_FREE_ON_EMPTY);
+
+                               if (upl_size > PAGE_SIZE)
+                                       ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
 
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
                                             (int)upl, 0, 0, retval, 0);
@@ -1626,8 +2543,8 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                if ((upl_f_offset + upl_offset + read_size) > newEOF)
                                        read_size = newEOF - (upl_f_offset + upl_offset);
 
-                               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size, devblocksize,
-                                                   CL_READ, (struct buf *)0);
+                               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
+                                                   CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
                                if (retval) {
                                        /*
                                         * we had an error during the read which causes us to abort
@@ -1636,7 +2553,9 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                         * modifying there state and mark the failed page in error
                                         */
                                        ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES);
-                                       ubc_upl_abort_range(upl, 0,          upl_size,  UPL_ABORT_FREE_ON_EMPTY);
+
+                                       if (upl_size > PAGE_SIZE)
+                                               ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
 
                                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
                                                     (int)upl, 0, 0, retval, 0);
@@ -1644,8 +2563,6 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                }
                        }
                }
-               if ((kret = ubc_upl_map(upl, &io_address)) != KERN_SUCCESS)
-                       panic("cluster_write: ubc_upl_map failed\n");
                xfer_resid = io_size;
                io_offset = start_offset;
 
@@ -1657,11 +2574,7 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                bytes_to_zero = xfer_resid;
 
                        if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
-                               bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
-
-                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
-                                            (int)upl_f_offset + io_offset, bytes_to_zero,
-                                            (int)io_offset, xfer_resid, 0);
+                               cluster_zero(upl, io_offset, bytes_to_zero, NULL);
                        } else {
                                int zero_pg_index;
 
@@ -1669,19 +2582,11 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
 
                                if ( !upl_valid_page(pl, zero_pg_index)) {
-                                       bzero((caddr_t)(io_address + io_offset), bytes_to_zero); 
-
-                                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
-                                                    (int)upl_f_offset + io_offset, bytes_to_zero,
-                                                    (int)io_offset, xfer_resid, 0);
+                                       cluster_zero(upl, io_offset, bytes_to_zero, NULL); 
 
                                } else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY &&
                                           !upl_dirty_page(pl, zero_pg_index)) {
-                                       bzero((caddr_t)(io_address + io_offset), bytes_to_zero); 
-
-                                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
-                                                    (int)upl_f_offset + io_offset, bytes_to_zero,
-                                                    (int)io_offset, xfer_resid, 0);
+                                       cluster_zero(upl, io_offset, bytes_to_zero, NULL); 
                                }
                        }
                        xfer_resid -= bytes_to_zero;
@@ -1689,25 +2594,22 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        zero_off   += bytes_to_zero;
                        io_offset  += bytes_to_zero;
                }
-               if (xfer_resid && uio_resid) {
-                       bytes_to_move = min(uio_resid, xfer_resid);
+               if (xfer_resid && io_resid) {
+                       u_int32_t  io_requested;
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 42)) | DBG_FUNC_NONE,
-                                    (int)uio->uio_offset, bytes_to_move, uio_resid, xfer_resid, 0);
-
-                       retval = uiomove((caddr_t)(io_address + io_offset), bytes_to_move, uio);
+                       bytes_to_move = min(io_resid, xfer_resid);
+                       io_requested = bytes_to_move;
 
+                       retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
 
                        if (retval) {
-                               if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS)
-                                       panic("cluster_write: kernel_upl_unmap failed\n");
 
                                ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
 
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
                                             (int)upl, 0, 0, retval, 0);
                        } else {
-                               uio_resid  -= bytes_to_move;
+                               io_resid   -= bytes_to_move;
                                xfer_resid -= bytes_to_move;
                                io_offset  += bytes_to_move;
                        }
@@ -1720,11 +2622,7 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                bytes_to_zero = xfer_resid;
 
                        if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
-                               bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
-
-                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
-                                            (int)upl_f_offset + io_offset,
-                                            bytes_to_zero, (int)io_offset, xfer_resid, 0);
+                               cluster_zero(upl, io_offset, bytes_to_zero, NULL); 
                        } else {
                                int zero_pg_index;
                        
@@ -1732,19 +2630,10 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                zero_pg_index = (int)((zero_off1 - upl_f_offset) / PAGE_SIZE_64);
 
                                if ( !upl_valid_page(pl, zero_pg_index)) {
-                                       bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
-
-                                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
-                                                    (int)upl_f_offset + io_offset,
-                                                    bytes_to_zero, (int)io_offset, xfer_resid, 0);
-
+                                       cluster_zero(upl, io_offset, bytes_to_zero, NULL); 
                                } else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY &&
                                           !upl_dirty_page(pl, zero_pg_index)) {
-                                       bzero((caddr_t)(io_address + io_offset), bytes_to_zero);
-
-                                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
-                                                    (int)upl_f_offset + io_offset,
-                                                    bytes_to_zero, (int)io_offset, xfer_resid, 0);
+                                       cluster_zero(upl, io_offset, bytes_to_zero, NULL); 
                                }
                        }
                        xfer_resid -= bytes_to_zero;
@@ -1755,26 +2644,19 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
 
                if (retval == 0) {
                        int cl_index;
-                       int can_delay;
+                       int ret_cluster_try_push;
 
                        io_size += start_offset;
 
-                       if ((upl_f_offset + io_size) >= newEOF && io_size < upl_size) {
+                       if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
                                /*
                                 * if we're extending the file with this write
                                 * we'll zero fill the rest of the page so that
                                 * if the file gets extended again in such a way as to leave a
                                 * hole starting at this EOF, we'll have zero's in the correct spot
                                 */
-                               bzero((caddr_t)(io_address + io_size), upl_size - io_size);
-
-                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE,
-                                            (int)upl_f_offset + io_size,
-                                            upl_size - io_size, 0, 0, 0);
+                               cluster_zero(upl, io_size, upl_size - io_size, NULL); 
                        }
-                       if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS)
-                               panic("cluster_write: kernel_upl_unmap failed\n");
-
                        if (flags & IO_SYNC)
                                /*
                                 * if the IO_SYNC flag is set than we need to 
@@ -1782,78 +2664,163 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                 * the I/O
                                 */
                                goto issue_io;
+check_cluster:
+                       /*
+                        * take the lock to protect our accesses
+                        * of the writebehind and sparse cluster state
+                        */
+                       wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
+
+                       /*
+                        * calculate the last logical block number 
+                        * that this delayed I/O encompassed
+                        */
+                       cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
+
+                       if (wbp->cl_scmap) {
+
+                               if ( !(flags & IO_NOCACHE)) {
+                                       /*
+                                        * we've fallen into the sparse
+                                        * cluster method of delaying dirty pages
+                                        * first, we need to release the upl if we hold one
+                                        * since pages in it may be present in the sparse cluster map
+                                        * and may span 2 separate buckets there... if they do and 
+                                        * we happen to have to flush a bucket to make room and it intersects
+                                        * this upl, a deadlock may result on page BUSY
+                                        */
+                                       if (upl_size)
+                                               ubc_upl_commit_range(upl, 0, upl_size,
+                                                                    UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+
+                                       sparse_cluster_add(wbp, vp, &cl, newEOF, callback, callback_arg);
+
+                                       lck_mtx_unlock(&wbp->cl_lockw);
+
+                                       continue;
+                               }
+                               /*
+                                * must have done cached writes that fell into
+                                * the sparse cluster mechanism... we've switched
+                                * to uncached writes on the file, so go ahead
+                                * and push whatever's in the sparse map
+                                * and switch back to normal clustering
+                                *
+                                * see the comment above concerning a possible deadlock...
+                                */
+                               if (upl_size) {
+                                       ubc_upl_commit_range(upl, 0, upl_size,
+                                                            UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+                                       /*
+                                        * setting upl_size to 0 keeps us from committing a
+                                        * second time in the start_new_cluster path
+                                        */
+                                       upl_size = 0;
+                               }
+                               sparse_cluster_push(wbp, vp, newEOF, PUSH_ALL, callback, callback_arg);
+
+                               wbp->cl_number = 0;
+                               /*
+                                * no clusters of either type present at this point
+                                * so just go directly to start_new_cluster since
+                                * we know we need to delay this I/O since we've
+                                * already released the pages back into the cache
+                                * to avoid the deadlock with sparse_cluster_push
+                                */
+                               goto start_new_cluster;
+                       }                   
+                       upl_offset = 0;
 
-                       if (vp->v_clen == 0)
+                       if (wbp->cl_number == 0)
                                /*
                                 * no clusters currently present
                                 */
                                goto start_new_cluster;
 
-                       /*
-                        * keep track of the overall dirty page
-                        * range we've developed
-                        * in case we have to fall back to the
-                        * VHASDIRTY method of flushing
-                        */
-                       if (vp->v_flag & VHASDIRTY)
-                               goto delay_io;
-
-                       for (cl_index = 0; cl_index < vp->v_clen; cl_index++) {
+                       for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
                                /*
-                                * we have an existing cluster... see if this write will extend it nicely
+                                * check each cluster that we currently hold
+                                * try to merge some or all of this write into
+                                * one or more of the existing clusters... if
+                                * any portion of the write remains, start a
+                                * new cluster
                                 */
-                               if (start_blkno >= vp->v_clusters[cl_index].start_pg) {
+                               if (cl.b_addr >= wbp->cl_clusters[cl_index].b_addr) {
                                        /*
                                         * the current write starts at or after the current cluster
                                         */
-                                       if (last_blkno <= (vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER)) {
+                                       if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + MAX_CLUSTER_SIZE)) {
                                                /*
                                                 * we have a write that fits entirely
                                                 * within the existing cluster limits
                                                 */
-                                               if (last_blkno > vp->v_clusters[cl_index].last_pg)
+                                               if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr)
                                                        /*
                                                         * update our idea of where the cluster ends
                                                         */
-                                                       vp->v_clusters[cl_index].last_pg = last_blkno;
+                                                       wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
                                                break;
                                        }
-                                       if (start_blkno < (vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER)) {
+                                       if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + MAX_CLUSTER_SIZE)) {
                                                /*
                                                 * we have a write that starts in the middle of the current cluster
-                                                * but extends beyond the cluster's limit
-                                                * we'll clip the current cluster if we actually
-                                                * overlap with the new write
-                                                * and start a new cluster with the current write
+                                                * but extends beyond the cluster's limit... we know this because
+                                                * of the previous checks
+                                                * we'll extend the current cluster to the max
+                                                * and update the b_addr for the current write to reflect that
+                                                * the head of it was absorbed into this cluster...
+                                                * note that we'll always have a leftover tail in this case since
+                                                * full absorbtion would have occurred in the clause above
                                                 */
-                                                if (vp->v_clusters[cl_index].last_pg > start_blkno)
-                                                       vp->v_clusters[cl_index].last_pg = start_blkno;
+                                               wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + MAX_CLUSTER_SIZE;
+
+                                               if (upl_size) {
+                                                       daddr64_t start_pg_in_upl;
+
+                                                       start_pg_in_upl = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
+                                                       
+                                                       if (start_pg_in_upl < wbp->cl_clusters[cl_index].e_addr) {
+                                                               intersection = (int)((wbp->cl_clusters[cl_index].e_addr - start_pg_in_upl) * PAGE_SIZE);
+
+                                                               ubc_upl_commit_range(upl, upl_offset, intersection,
+                                                                                    UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+                                                               upl_f_offset += intersection;
+                                                               upl_offset   += intersection;
+                                                               upl_size     -= intersection;
+                                                       }
+                                               }
+                                               cl.b_addr = wbp->cl_clusters[cl_index].e_addr;
                                        }
                                        /*
-                                        * we also get here for the case where the current write starts
-                                        * beyond the limit of the existing cluster
+                                        * we come here for the case where the current write starts
+                                        * beyond the limit of the existing cluster or we have a leftover
+                                        * tail after a partial absorbtion
                                         *
                                         * in either case, we'll check the remaining clusters before 
                                         * starting a new one
                                         */
                                } else {
                                        /*
-                                        * the current write starts in front of the current cluster
+                                        * the current write starts in front of the cluster we're currently considering
                                         */
-                                       if ((vp->v_clusters[cl_index].last_pg - start_blkno) <=  MAX_UPL_TRANSFER) {
+                                       if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= MAX_CLUSTER_SIZE) {
                                                /*
-                                                * we can just merge the old cluster
-                                                * with the new request and leave it
-                                                * in the cache
+                                                * we can just merge the new request into
+                                                * this cluster and leave it in the cache
+                                                * since the resulting cluster is still 
+                                                * less than the maximum allowable size
                                                 */
-                                               vp->v_clusters[cl_index].start_pg = start_blkno;
+                                               wbp->cl_clusters[cl_index].b_addr = cl.b_addr;
 
-                                               if (last_blkno > vp->v_clusters[cl_index].last_pg) {
+                                               if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) {
                                                        /*
                                                         * the current write completely
-                                                        * envelops the existing cluster
+                                                        * envelops the existing cluster and since
+                                                        * each write is limited to at most MAX_CLUSTER_SIZE pages
+                                                        * we can just use the start and last blocknos of the write
+                                                        * to generate the cluster limits
                                                         */
-                                                       vp->v_clusters[cl_index].last_pg = last_blkno;
+                                                       wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
                                                }
                                                break;
                                        }
@@ -1862,32 +2829,53 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                                         * if we were to combine this write with the current cluster
                                         * we would exceed the cluster size limit.... so,
                                         * let's see if there's any overlap of the new I/O with
-                                        * the existing cluster...
+                                        * the cluster we're currently considering... in fact, we'll
+                                        * stretch the cluster out to it's full limit and see if we
+                                        * get an intersection with the current write
                                         * 
                                         */
-                                       if (last_blkno > vp->v_clusters[cl_index].start_pg)
+                                       if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - MAX_CLUSTER_SIZE) {
                                                /*
-                                                * the current write extends into the existing cluster
-                                                * clip the current cluster by moving the start position
-                                                * to where the current write ends
+                                                * the current write extends into the proposed cluster
+                                                * clip the length of the current write after first combining it's
+                                                * tail with the newly shaped cluster
                                                 */
-                                               vp->v_clusters[cl_index].start_pg = last_blkno;
+                                               wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - MAX_CLUSTER_SIZE;
+
+                                               if (upl_size) {
+                                                       intersection = (int)((cl.e_addr - wbp->cl_clusters[cl_index].b_addr) * PAGE_SIZE);
+
+                                                       if ((u_int)intersection > upl_size)
+                                                               /*
+                                                                * because the current write may consist of a number of pages found in the cache
+                                                                * which are not part of the UPL, we may have an intersection that exceeds
+                                                                * the size of the UPL that is also part of this write
+                                                                */
+                                                               intersection = upl_size;
+
+                                                       ubc_upl_commit_range(upl, upl_offset + (upl_size - intersection), intersection,
+                                                                            UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+                                                       upl_size -= intersection;
+                                               }
+                                               cl.e_addr = wbp->cl_clusters[cl_index].b_addr;
+                                       }
                                        /*
                                         * if we get here, there was no way to merge
-                                        * the new I/O with this cluster and
-                                        * keep it under our maximum cluster length
+                                        * any portion of this write with this cluster 
+                                        * or we could only merge part of it which 
+                                        * will leave a tail...
                                         * we'll check the remaining clusters before starting a new one
                                         */
                                }
                        }
-                       if (cl_index < vp->v_clen)
+                       if (cl_index < wbp->cl_number)
                                /*
-                                * we found an existing cluster that we
-                                * could merger this I/O into
+                                * we found an existing cluster(s) that we
+                                * could entirely merge this I/O into
                                 */
                                goto delay_io;
 
-                       if (vp->v_clen < MAX_CLUSTERS && !(vp->v_flag & VNOCACHE_DATA))
+                       if (wbp->cl_number < MAX_CLUSTERS)
                                /*
                                 * we didn't find an existing cluster to
                                 * merge into, but there's room to start
@@ -1898,234 +2886,198 @@ cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
                        /*
                         * no exisitng cluster to merge with and no
                         * room to start a new one... we'll try 
-                        * pushing the existing ones... if none of
-                        * them are able to be pushed, we'll have
-                        * to fall back on the VHASDIRTY mechanism
-                        * cluster_try_push will set v_clen to the
-                        * number of remaining clusters if it is
-                        * unable to push all of them
-                        */
-                       if (vp->v_flag & VNOCACHE_DATA)
-                               can_delay = 0;
-                       else
-                               can_delay = 1;
+                        * pushing one of the existing ones... if none of
+                        * them are able to be pushed, we'll switch
+                        * to the sparse cluster mechanism
+                        * cluster_try_push updates cl_number to the
+                        * number of remaining clusters... and
+                        * returns the number of currently unused clusters
+                        */
+                       ret_cluster_try_push = 0;
 
-                       if (cluster_try_push(vp, newEOF, 0, 0) == 0) {
-                               vp->v_flag |= VHASDIRTY;
-                               goto delay_io;
+                       /*
+                        * if writes are not deferred, call cluster push immediately
+                        */
+                       if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
+                               
+                               ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, callback, callback_arg);
                        }
-start_new_cluster:
-                       if (vp->v_clen == 0) {
-                               vp->v_ciosiz = devblocksize;
-                               vp->v_cstart = start_blkno;
-                               vp->v_lastw  = last_blkno;
+
+                       /*
+                        * execute following regardless of writes being deferred or not
+                        */
+                       if (ret_cluster_try_push == 0) {
+                               /*
+                                * no more room in the normal cluster mechanism
+                                * so let's switch to the more expansive but expensive
+                                * sparse mechanism....
+                                * first, we need to release the upl if we hold one
+                                * since pages in it may be present in the sparse cluster map (after the cluster_switch)
+                                * and may span 2 separate buckets there... if they do and 
+                                * we happen to have to flush a bucket to make room and it intersects
+                                * this upl, a deadlock may result on page BUSY
+                                */
+                               if (upl_size)
+                                       ubc_upl_commit_range(upl, upl_offset, upl_size,
+                                                            UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+
+                               sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg);
+                               sparse_cluster_add(wbp, vp, &cl, newEOF, callback, callback_arg);
+
+                               lck_mtx_unlock(&wbp->cl_lockw);
+
+                               continue;
                        }
-                       vp->v_clusters[vp->v_clen].start_pg = start_blkno;
-                       vp->v_clusters[vp->v_clen].last_pg  = last_blkno;
-                       vp->v_clen++;
-delay_io:
                        /*
-                        * make sure we keep v_cstart and v_lastw up to 
-                        * date in case we have to fall back on the
-                        * V_HASDIRTY mechanism (or we've already entered it)
+                        * we pushed one cluster successfully, so we must be sequentially writing this file
+                        * otherwise, we would have failed and fallen into the sparse cluster support
+                        * so let's take the opportunity to push out additional clusters...
+                        * this will give us better I/O locality if we're in a copy loop
+                        * (i.e.  we won't jump back and forth between the read and write points
                         */
-                       if (start_blkno < vp->v_cstart)
-                               vp->v_cstart = start_blkno;
-                       if (last_blkno > vp->v_lastw)
-                               vp->v_lastw = last_blkno;
+                       if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
+                               while (wbp->cl_number)
+                                       cluster_try_push(wbp, vp, newEOF, 0, callback, callback_arg);
+                       }
+
+start_new_cluster:
+                       wbp->cl_clusters[wbp->cl_number].b_addr = cl.b_addr;
+                       wbp->cl_clusters[wbp->cl_number].e_addr = cl.e_addr;
+
+                       wbp->cl_clusters[wbp->cl_number].io_flags = 0;
+
+                       if (flags & IO_NOCACHE)
+                               wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
+
+                       if (bflag & CL_PASSIVE)
+                               wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
+
+                       wbp->cl_number++;
+delay_io:
+                       if (upl_size)
+                               ubc_upl_commit_range(upl, upl_offset, upl_size,
+                                                    UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+
+                       lck_mtx_unlock(&wbp->cl_lockw);
 
-                       ubc_upl_commit_range(upl, 0, upl_size, UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
                        continue;
 issue_io:
                        /*
+                        * we don't hold the vnode lock at this point
+                        *
+                        * because we had to ask for a UPL that provides currenty non-present pages, the
+                        * UPL has been automatically set to clear the dirty flags (both software and hardware)
+                        * upon committing it... this is not the behavior we want since it's possible for
+                        * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
                         * in order to maintain some semblance of coherency with mapped writes
-                        * we need to write the cluster back out as a multiple of the PAGESIZE
-                        * unless the cluster encompasses the last page of the file... in this
-                        * case we'll round out to the nearest device block boundary
+                        * we need to drop the current upl and pick it back up with COPYOUT_FROM set
+                        * so that we correctly deal with a change in state of the hardware modify bit...
+                        * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
+                        * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
+                        * responsible for generating the correct sized I/O(s)
                         */
-                       io_size = upl_size;
-
-                       if ((upl_f_offset + io_size) > newEOF) {
-                               io_size = newEOF - upl_f_offset;
-                               io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1);
-                       }
-
-                       if (flags & IO_SYNC)
-                               io_flags = CL_COMMIT | CL_AGE;
-                       else
-                               io_flags = CL_COMMIT | CL_AGE | CL_ASYNC;
+                       ubc_upl_commit_range(upl, 0, upl_size,
+                                            UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
 
-                       if (vp->v_flag & VNOCACHE_DATA)
-                               io_flags |= CL_DUMP;
+                       cl.e_addr = (upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64;
 
-                       while (vp->v_numoutput >= ASYNC_THROTTLE) {
-                               vp->v_flag |= VTHROTTLED;
-                               tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_write", 0);
-                       }       
-                       retval = cluster_io(vp, upl, 0, upl_f_offset, io_size, devblocksize,
-                                           io_flags, (struct buf *)0);
+                       retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg);
                }
        }
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
-                    retval, 0, 0, 0, 0);
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
 
        return (retval);
 }
 
+
+
 int
-cluster_read(vp, uio, filesize, devblocksize, flags)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t         filesize;
-       int           devblocksize;
-       int           flags;
+cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
 {
-       int           prev_resid;
-       int           clip_size;
-       off_t         max_io_size;
-       struct iovec  *iov;
-       vm_offset_t   upl_offset;
-       int           upl_size;
-       int           pages_in_pl;
-       upl_page_info_t *pl;
-       int           upl_flags;
-       upl_t         upl;
-       int           retval = 0;
+        return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
+}
 
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
-                    (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0);
 
-       /*
-        * We set a threshhold of 4 pages to decide if the nocopy
-        * read loop is worth the trouble...
+int
+cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+       int             retval = 0;
+       int             flags;
+       user_ssize_t    cur_resid;
+        u_int32_t      io_size;
+       u_int32_t       read_length = 0;
+       int             read_type = IO_COPY;
+
+       flags = xflags;
+
+       if (vp->v_flag & VNOCACHE_DATA)
+               flags |= IO_NOCACHE;
+       if ((vp->v_flag & VRAOFF) || speculative_reads_disabled)
+               flags |= IO_RAOFF;
+
+        /*
+        * do a read through the cache if one of the following is true....
+        *   NOCACHE is not true
+        *   the uio request doesn't target USERSPACE
+        * otherwise, find out if we want the direct or contig variant for
+        * the first vector in the uio request
         */
+       if ( (flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
+               retval = cluster_io_type(uio, &read_type, &read_length, 0);
 
-       if (!((vp->v_flag & VNOCACHE_DATA) && (uio->uio_segflg == UIO_USERSPACE)))
-         {
-           retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
-           KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
-                        (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0);
-           return(retval);
-         }
-
-       while (uio->uio_resid && uio->uio_offset < filesize && retval == 0)
-         {
-           /* we know we have a resid, so this is safe */
-           iov = uio->uio_iov;
-           while (iov->iov_len == 0) {
-             uio->uio_iov++;
-             uio->uio_iovcnt--;
-             iov = uio->uio_iov;
-           }
-
-           /*
-            * We check every vector target and if it is physically 
-            * contiguous space, we skip the sanity checks.
-            */
-
-            upl_offset = (vm_offset_t)iov->iov_base & ~PAGE_MASK;
-            upl_size = (upl_offset + PAGE_SIZE +(PAGE_SIZE -1)) & ~PAGE_MASK;
-            pages_in_pl = 0;
-            upl_flags = UPL_QUERY_OBJECT_TYPE;
-            if((vm_map_get_upl(current_map(),
-                              (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                               &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0)) != KERN_SUCCESS)
-              {
-               /*
-                * the user app must have passed in an invalid address
-                */
-               return (EFAULT);
-              }
+       while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
 
-           if (upl_flags & UPL_PHYS_CONTIG)
-             {
-               retval = cluster_phys_read(vp, uio, filesize);
-             }
-           else if (uio->uio_resid < 4 * PAGE_SIZE)
-             {
-               /*
-                * We set a threshhold of 4 pages to decide if the nocopy
-                * read loop is worth the trouble...
-                */
-               retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
-                            (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0);
-               return(retval);
-             }
-           else if (uio->uio_offset & PAGE_MASK_64)
-             {
-               /* Bring the file offset read up to a pagesize boundary */
-               clip_size = (PAGE_SIZE - (int)(uio->uio_offset & PAGE_MASK_64));
-               if (uio->uio_resid < clip_size)
-                 clip_size = uio->uio_resid;
-               /* 
-                * Fake the resid going into the cluster_read_x call
-                * and restore it on the way out.
-                */
-               prev_resid = uio->uio_resid;
-               uio->uio_resid = clip_size;
-               retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
-               uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-             }
-           else if ((int)iov->iov_base & PAGE_MASK_64)
-             {
-               clip_size = iov->iov_len;
-               prev_resid = uio->uio_resid;
-               uio->uio_resid = clip_size;
-               retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
-               uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-             }
-           else
-             {
-               /* 
-                * If we come in here, we know the offset into
-                * the file is on a pagesize boundary
-                */
-
-               max_io_size = filesize - uio->uio_offset;
-               clip_size = uio->uio_resid;
-               if (iov->iov_len < clip_size)
-                 clip_size = iov->iov_len;
-               if (max_io_size < clip_size)
-                 clip_size = (int)max_io_size;
-
-               if (clip_size < PAGE_SIZE)
-                 {
-                   /*
-                    * Take care of the tail end of the read in this vector.
-                    */
-                   prev_resid = uio->uio_resid;
-                   uio->uio_resid = clip_size;
-                   retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
-                   uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-                 }
-               else
-                 {
-                   /* round clip_size down to a multiple of pagesize */
-                   clip_size = clip_size & ~(PAGE_MASK);
-                   prev_resid = uio->uio_resid;
-                   uio->uio_resid = clip_size;
-                   retval = cluster_nocopy_read(vp, uio, filesize, devblocksize, flags);
-                   if ((retval==0) && uio->uio_resid)
-                     retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
-                   uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
-                 }
-             } /* end else */
-         } /* end while */
-
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
-                    (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0);
+               switch (read_type) {
+               
+               case IO_COPY:
+                       /*
+                        * make sure the uio_resid isn't too big...
+                        * internally, we want to handle all of the I/O in
+                        * chunk sizes that fit in a 32 bit int
+                        */
+                       if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE))
+                               io_size = MAX_IO_REQUEST_SIZE;
+                       else
+                               io_size = (u_int32_t)cur_resid;
 
-       return(retval);
+                       retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
+                       break;
+
+               case IO_DIRECT:
+                       retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
+                       break;
+
+               case IO_CONTIG:
+                       retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
+                       break;
+                 
+               case IO_UNKNOWN:
+                       retval = cluster_io_type(uio, &read_type, &read_length, 0);
+                       break;
+               }
+       }
+       return (retval);
+}
+
+
+
+static void
+cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int flags)
+{
+       int range;
+       int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
+
+       if ((range = last_pg - start_pg)) {
+               if ( !(flags & IO_NOCACHE))
+                       abort_flags |= UPL_ABORT_REFERENCE;
+
+               ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
+       }
 }
 
+
 static int
-cluster_read_x(vp, uio, filesize, devblocksize, flags)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t         filesize;
-       int           devblocksize;
-       int           flags;
+cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
 {
        upl_page_info_t *pl;
        upl_t            upl;
@@ -2135,21 +3087,84 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
        int              start_offset;
        int              start_pg;
        int              last_pg;
-       int              uio_last;
+       int              uio_last = 0;
        int              pages_in_upl;
        off_t            max_size;
-       int              io_size;
-       vm_offset_t      io_address;
+       off_t            last_ioread_offset;
+       off_t            last_request_offset;
        kern_return_t    kret;
-       int              segflg;
        int              error  = 0;
        int              retval = 0;
-       int              b_lblkno;
-       int              e_lblkno;
+       u_int32_t        size_of_prefetch;
+       u_int32_t        xsize;
+       u_int32_t        io_size;
+       u_int32_t        max_rd_size = MAX_PREFETCH;
+       u_int            rd_ahead_enabled = 1;
+       u_int            prefetch_enabled = 1;
+       struct cl_readahead *   rap;
+       struct clios            iostate;
+       struct cl_extent        extent;
+       int              bflag;
+       int              take_reference = 1;
+       struct uthread  *ut;
+       int              policy = IOPOL_DEFAULT;
+
+       policy = current_proc()->p_iopol_disk;
+
+       ut = get_bsdthread_info(current_thread());
+
+       if (ut->uu_iopol_disk != IOPOL_DEFAULT)
+               policy = ut->uu_iopol_disk;
+
+       if (policy == IOPOL_THROTTLE)
+               take_reference = 0;
+
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
+                    (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
+                        
+       last_request_offset = uio->uio_offset + io_req_size;
+
+       if ((flags & (IO_RAOFF|IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
+               rd_ahead_enabled = 0;
+               rap = NULL;
+       } else {
+               if (cluster_hard_throttle_on(vp)) {
+                       rd_ahead_enabled = 0;
+                       prefetch_enabled = 0;
 
-       b_lblkno = (int)(uio->uio_offset / PAGE_SIZE_64);
+                       max_rd_size = HARD_THROTTLE_MAXSIZE;
+               }
+               if ((rap = cluster_get_rap(vp)) == NULL)
+                       rd_ahead_enabled = 0;
+       }
+       if (last_request_offset > filesize)
+               last_request_offset = filesize;
+       extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
+        extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
+
+       if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
+               /*
+                * determine if we already have a read-ahead in the pipe courtesy of the
+                * last read systemcall that was issued...
+                * if so, pick up it's extent to determine where we should start
+                * with respect to any read-ahead that might be necessary to 
+                * garner all the data needed to complete this read systemcall
+                */
+               last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
+
+               if (last_ioread_offset < uio->uio_offset)
+                       last_ioread_offset = (off_t)0;
+               else if (last_ioread_offset > last_request_offset)
+                       last_ioread_offset = last_request_offset;
+       } else
+               last_ioread_offset = (off_t)0;
 
-       while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) {
+       while (io_req_size && uio->uio_offset < filesize && retval == 0) {
                /*
                 * compute the size of the upl needed to encompass
                 * the requested read... limit each call to cluster_io
@@ -2162,88 +3177,118 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                upl_f_offset = uio->uio_offset - (off_t)start_offset;
                max_size     = filesize - uio->uio_offset;
 
-               if ((off_t)((unsigned int)uio->uio_resid) < max_size)
-                       io_size = uio->uio_resid;
+               if ((off_t)(io_req_size) < max_size)
+                       io_size = io_req_size;
                else
                        io_size = max_size;
 
-               if (uio->uio_segflg == UIO_USERSPACE && !(vp->v_flag & VNOCACHE_DATA)) {
-                       segflg = uio->uio_segflg;
+               if (!(flags & IO_NOCACHE)) {
 
-                       uio->uio_segflg = UIO_PHYS_USERSPACE;
+                       while (io_size) {
+                               u_int32_t io_resid;
+                               u_int32_t io_requested;
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
-                                    (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0);
+                               /*
+                                * if we keep finding the pages we need already in the cache, then
+                                * don't bother to call cluster_read_prefetch since it costs CPU cycles
+                                * to determine that we have all the pages we need... once we miss in
+                                * the cache and have issued an I/O, than we'll assume that we're likely
+                                * to continue to miss in the cache and it's to our advantage to try and prefetch
+                                */
+                               if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) {
+                                       if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
+                                               /*
+                                                * we've already issued I/O for this request and
+                                                * there's still work to do and
+                                                * our prefetch stream is running dry, so issue a
+                                                * pre-fetch I/O... the I/O latency will overlap
+                                                * with the copying of the data
+                                                */
+                                               if (size_of_prefetch > max_rd_size)
+                                                       size_of_prefetch = max_rd_size;
 
-                       while (io_size && retval == 0) {
-                               int         xsize;
-                               vm_offset_t paddr;
+                                               size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
 
-                               if (ubc_page_op(vp,
-                                               upl_f_offset,
-                                               UPL_POP_SET | UPL_POP_BUSY,
-                                               &paddr, 0) != KERN_SUCCESS)
-                                       break;
+                                               last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
+                               
+                                               if (last_ioread_offset > last_request_offset)
+                                                       last_ioread_offset = last_request_offset;
+                                       }
+                               }
+                               /*
+                                * limit the size of the copy we're about to do so that 
+                                * we can notice that our I/O pipe is running dry and 
+                                * get the next I/O issued before it does go dry
+                                */
+                               if (last_ioread_offset && io_size > ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4))
+                                       io_resid = ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4);
+                               else
+                                       io_resid = io_size;
 
-                               xsize = PAGE_SIZE - start_offset;
-                       
-                               if (xsize > io_size)
-                                       xsize = io_size;
+                               io_requested = io_resid;
 
-                               retval = uiomove((caddr_t)(paddr + start_offset), xsize, uio);
+                               retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference);
 
-                               ubc_page_op(vp, upl_f_offset,
-                                           UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
+                               xsize = io_requested - io_resid;
 
-                               io_size     -= xsize;
-                               start_offset = (int)
-                                       (uio->uio_offset & PAGE_MASK_64);
-                               upl_f_offset = uio->uio_offset - start_offset;
-                       }
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
-                                    (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0);
+                               io_size -= xsize;
+                               io_req_size -= xsize;
 
-                       uio->uio_segflg = segflg;
-                       
+                               if (retval || io_resid)
+                                       /*
+                                        * if we run into a real error or
+                                        * a page that is not in the cache
+                                        * we need to leave streaming mode
+                                        */
+                                       break;
+                               
+                               if ((io_size == 0 || last_ioread_offset == last_request_offset) && rd_ahead_enabled) {
+                                       /*
+                                        * we're already finished the I/O for this read request
+                                        * let's see if we should do a read-ahead
+                                        */
+                                       cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
+                               }
+                       }
                        if (retval)
                                break;
-
                        if (io_size == 0) {
-                               /*
-                                * we're already finished with this read request
-                                * let's see if we should do a read-ahead
-                                */
-                               e_lblkno = (int)
-                                       ((uio->uio_offset - 1) / PAGE_SIZE_64);
-
-                               if (!(vp->v_flag & VRAOFF))
-                                       /*
-                                        * let's try to read ahead if we're in 
-                                        * a sequential access pattern
-                                        */
-                                       cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize);
-                               vp->v_lastr = e_lblkno;
-
+                               if (rap != NULL) {
+                                       if (extent.e_addr < rap->cl_lastr)
+                                               rap->cl_maxra = 0;
+                                       rap->cl_lastr = extent.e_addr;
+                               }
                                break;
                        }
-                       max_size = filesize - uio->uio_offset;
+                       start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
+                       upl_f_offset = uio->uio_offset - (off_t)start_offset;
+                       max_size     = filesize - uio->uio_offset;
                }
+               if (io_size > max_rd_size)
+                       io_size = max_rd_size;
+
                upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
-               if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
-                       upl_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+
+               if (flags & IO_NOCACHE) {
+                       if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
+                               upl_size = (MAX_UPL_TRANSFER * PAGE_SIZE);
+               } else {
+                       if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4)
+                               upl_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 4;
+               }
                pages_in_upl = upl_size / PAGE_SIZE;
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
                             (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
 
                kret = ubc_create_upl(vp, 
-                                               upl_f_offset,
-                                               upl_size,
-                                               &upl,
-                                               &pl,
-                                               UPL_FLAGS_NONE);
+                                     upl_f_offset,
+                                     upl_size,
+                                     &upl,
+                                     &pl,
+                                     UPL_FILE_IO | UPL_SET_LITE);
                if (kret != KERN_SUCCESS)
-                       panic("cluster_read: failed to get pagelist");
+                       panic("cluster_read_copy: failed to get pagelist");
 
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
                             (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
@@ -2269,6 +3314,10 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                        if (upl_valid_page(pl, last_pg))
                                break;
                }
+               iostate.io_completed = 0;
+               iostate.io_issued = 0;
+               iostate.io_error = 0;
+               iostate.io_wanted = 0;
 
                if (start_pg < last_pg) {               
                        /*
@@ -2284,543 +3333,849 @@ cluster_read_x(vp, uio, filesize, devblocksize, flags)
                                io_size = filesize - (upl_f_offset + upl_offset);
 
                        /*
-                        * issue a synchronous read to cluster_io
+                        * issue an asynchronous read to cluster_io
                         */
 
                        error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
-                                          io_size, devblocksize, CL_READ, (struct buf *)0);
+                                          io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
                }
                if (error == 0) {
                        /*
                         * if the read completed successfully, or there was no I/O request
-                        * issued, than map the upl into kernel address space and
-                        * move the data into user land.... we'll first add on any 'valid'
+                        * issued, than copy the data into user land via 'cluster_upl_copy_data'
+                        * we'll first add on any 'valid'
                         * pages that were present in the upl when we acquired it.
                         */
                        u_int  val_size;
-                       u_int  size_of_prefetch;
 
                        for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
                                if (!upl_valid_page(pl, uio_last))
                                        break;
                        }
+                       if (uio_last < pages_in_upl) {
+                               /*
+                                * there were some invalid pages beyond the valid pages
+                                * that we didn't issue an I/O for, just release them
+                                * unchanged now, so that any prefetch/readahed can
+                                * include them
+                                */
+                               ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
+                                                   (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
+                       }
+
                        /*
-                        * compute size to transfer this round,  if uio->uio_resid is
-                        * still non-zero after this uiomove, we'll loop around and
+                        * compute size to transfer this round,  if io_req_size is
+                        * still non-zero after this attempt, we'll loop around and
                         * set up for another I/O.
                         */
                        val_size = (uio_last * PAGE_SIZE) - start_offset;
                
-                       if (max_size < val_size)
+                       if (val_size > max_size)
                                val_size = max_size;
 
-                       if (uio->uio_resid < val_size)
-                               val_size = uio->uio_resid;
+                       if (val_size > io_req_size)
+                               val_size = io_req_size;
 
-                       e_lblkno = (int)((uio->uio_offset + ((off_t)val_size - 1)) / PAGE_SIZE_64);
+                       if ((uio->uio_offset + val_size) > last_ioread_offset)
+                               last_ioread_offset = uio->uio_offset + val_size;
 
-                       if (size_of_prefetch = (uio->uio_resid - val_size)) {
-                               /*
-                                * if there's still I/O left to do for this request, then issue a
-                                * pre-fetch I/O... the I/O wait time will overlap
-                                * with the copying of the data
-                                */
-                               cluster_rd_prefetch(vp, uio->uio_offset + val_size, size_of_prefetch, filesize, devblocksize);
-                       } else {
-                               if (!(vp->v_flag & VRAOFF) && !(vp->v_flag & VNOCACHE_DATA))
+                       if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) {
+
+                               if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
                                        /*
-                                        * let's try to read ahead if we're in 
-                                        * a sequential access pattern
+                                        * if there's still I/O left to do for this request, and...
+                                        * we're not in hard throttle mode, and...
+                                        * we're close to using up the previous prefetch, then issue a
+                                        * new pre-fetch I/O... the I/O latency will overlap
+                                        * with the copying of the data
                                         */
-                                       cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize);
-                               vp->v_lastr = e_lblkno;
-                       }
-                       if (uio->uio_segflg == UIO_USERSPACE) {
-                               int       offset;
-
-                               segflg = uio->uio_segflg;
-
-                               uio->uio_segflg = UIO_PHYS_USERSPACE;
-
-
-                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
-                                            (int)uio->uio_offset, val_size, uio->uio_resid, 0, 0);
-
-                               offset = start_offset;
+                                       if (size_of_prefetch > max_rd_size)
+                                               size_of_prefetch = max_rd_size;
 
-                               while (val_size && retval == 0) {
-                                       int       csize;
-                                       int       i;
-                                       caddr_t   paddr;
+                                       size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
 
-                                       i = offset / PAGE_SIZE;
-                                       csize = min(PAGE_SIZE - start_offset, val_size);
-
-                                       paddr = (caddr_t)upl_phys_page(pl, i) + start_offset;
-
-                                       retval = uiomove(paddr, csize, uio);
-
-                                       val_size    -= csize;
-                                       offset      += csize;
-                                       start_offset = offset & PAGE_MASK;
+                                       last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
+                               
+                                       if (last_ioread_offset > last_request_offset)
+                                               last_ioread_offset = last_request_offset;
                                }
-                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
-                                            (int)uio->uio_offset, val_size, uio->uio_resid, 0, 0);
 
-                               uio->uio_segflg = segflg;
+                       } else if ((uio->uio_offset + val_size) == last_request_offset) {
+                               /*
+                                * this transfer will finish this request, so...
+                                * let's try to read ahead if we're in 
+                                * a sequential access pattern and we haven't
+                                * explicitly disabled it
+                                */
+                               if (rd_ahead_enabled)
+                                       cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
+                                       
+                               if (rap != NULL) {
+                                       if (extent.e_addr < rap->cl_lastr)
+                                               rap->cl_maxra = 0;
+                                       rap->cl_lastr = extent.e_addr;
+                               }
                        }
-                       else
-                       {
-                               if ((kret = ubc_upl_map(upl, &io_address)) != KERN_SUCCESS)
-                                       panic("cluster_read: ubc_upl_map() failed\n");
+                       lck_mtx_lock(cl_mtxp);
 
-                               retval = uiomove((caddr_t)(io_address + start_offset), val_size, uio);
+                       while (iostate.io_issued != iostate.io_completed) {
+                               iostate.io_wanted = 1;
+                               msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_copy", NULL);
+                       }       
+                       lck_mtx_unlock(cl_mtxp);
+
+                       if (iostate.io_error)
+                               error = iostate.io_error;
+                       else {
+                               u_int32_t io_requested;
+
+                               io_requested = val_size;
 
-                               if ((kret = ubc_upl_unmap(upl)) != KERN_SUCCESS)
-                                       panic("cluster_read: ubc_upl_unmap() failed\n");
+                               retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
+                               
+                               io_req_size -= (val_size - io_requested);
                        }
                }
                if (start_pg < last_pg) {
                        /*
                         * compute the range of pages that we actually issued an I/O for
                         * and either commit them as valid if the I/O succeeded
-                        * or abort them if the I/O failed
+                        * or abort them if the I/O failed or we're not supposed to 
+                        * keep them in the cache
                         */
                        io_size = (last_pg - start_pg) * PAGE_SIZE;
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
-                                    (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
 
-                       if (error || (vp->v_flag & VNOCACHE_DATA))
+                       if (error || (flags & IO_NOCACHE))
                                ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
-                                               UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
+                                                   UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
                        else
                                ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, 
-                                               UPL_COMMIT_CLEAR_DIRTY
-                                               | UPL_COMMIT_FREE_ON_EMPTY 
-                                               | UPL_COMMIT_INACTIVATE);
+                                                    UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY | UPL_COMMIT_INACTIVATE);
 
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END,
-                                    (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
                }
                if ((last_pg - start_pg) < pages_in_upl) {
-                       int cur_pg;
-                       int commit_flags;
-
                        /*
                         * the set of pages that we issued an I/O for did not encompass
                         * the entire upl... so just release these without modifying
-                        * there state
+                        * their state
                         */
                        if (error)
                                ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
                        else {
+
                                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
                                             (int)upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
 
-                               if (start_pg) {
-                                       /*
-                                        * we found some already valid pages at the beginning of
-                                        * the upl commit these back to the inactive list with
-                                        * reference cleared
-                                        */
-                                       for (cur_pg = 0; cur_pg < start_pg; cur_pg++) {
-                                               commit_flags = UPL_COMMIT_FREE_ON_EMPTY 
-                                                                  | UPL_COMMIT_INACTIVATE;
-                                               
-                                               if (upl_dirty_page(pl, cur_pg))
-                                                       commit_flags |= UPL_COMMIT_SET_DIRTY;
-                                               
-                                               if ( !(commit_flags & UPL_COMMIT_SET_DIRTY) && (vp->v_flag & VNOCACHE_DATA))
-                                                       ubc_upl_abort_range(upl, cur_pg * PAGE_SIZE, PAGE_SIZE,
-                                                               UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
-                                               else
-                                                       ubc_upl_commit_range(upl, cur_pg * PAGE_SIZE, 
-                                                               PAGE_SIZE, commit_flags);
-                                       }
-                               }
-                               if (last_pg < uio_last) {
-                                       /*
-                                        * we found some already valid pages immediately after the
-                                        * pages we issued I/O for, commit these back to the
-                                        * inactive list with reference cleared
-                                        */
-                                       for (cur_pg = last_pg; cur_pg < uio_last; cur_pg++) {
-                                               commit_flags =  UPL_COMMIT_FREE_ON_EMPTY 
-                                                                               | UPL_COMMIT_INACTIVATE;
-
-                                               if (upl_dirty_page(pl, cur_pg))
-                                                       commit_flags |= UPL_COMMIT_SET_DIRTY;
-                                               
-                                               if ( !(commit_flags & UPL_COMMIT_SET_DIRTY) && (vp->v_flag & VNOCACHE_DATA))
-                                                       ubc_upl_abort_range(upl, cur_pg * PAGE_SIZE, PAGE_SIZE,
-                                                               UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
-                                               else
-                                                       ubc_upl_commit_range(upl, cur_pg * PAGE_SIZE, 
-                                                               PAGE_SIZE, commit_flags);
-                                       }
-                               }
-                               if (uio_last < pages_in_upl) {
-                                       /*
-                                        * there were some invalid pages beyond the valid pages
-                                        * that we didn't issue an I/O for, just release them
-                                        * unchanged
-                                        */
-                                       ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
-                                                           (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
-                               }
+                               /*
+                                * handle any valid pages at the beginning of
+                                * the upl... release these appropriately
+                                */
+                               cluster_read_upl_release(upl, 0, start_pg, flags);
 
-                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END,
-                                       (int)upl, -1, -1, 0, 0);
+                               /*
+                                * handle any valid pages immediately after the
+                                * pages we issued I/O for... ... release these appropriately
+                                */
+                               cluster_read_upl_release(upl, last_pg, uio_last, flags);
+
+                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, (int)upl, -1, -1, 0, 0);
                        }
                }
                if (retval == 0)
                        retval = error;
+
+               if (io_req_size) {
+                       if (cluster_hard_throttle_on(vp)) {
+                               rd_ahead_enabled = 0;
+                               prefetch_enabled = 0;
+
+                               max_rd_size = HARD_THROTTLE_MAXSIZE;
+                       } else {
+                               if (max_rd_size == HARD_THROTTLE_MAXSIZE) {
+                                       /*
+                                        * coming out of throttled state
+                                        */
+                                       if (rap != NULL)
+                                               rd_ahead_enabled = 1;
+                                       prefetch_enabled = 1;
+
+                                       max_rd_size = MAX_PREFETCH;
+                                       last_ioread_offset = 0;
+                               }
+                       }
+               }
+       }
+       if (rap != NULL) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
+                            (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
+
+               lck_mtx_unlock(&rap->cl_lockr);
+       } else {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
+                            (int)uio->uio_offset, io_req_size, 0, retval, 0);
        }
 
        return (retval);
 }
 
+
 static int
-cluster_nocopy_read(vp, uio, filesize, devblocksize, flags)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t         filesize;
-       int           devblocksize;
-       int           flags;
+cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
+                   int flags, int (*callback)(buf_t, void *), void *callback_arg)
 {
        upl_t            upl;
        upl_page_info_t  *pl;
-       off_t            upl_f_offset;
+        off_t           max_io_size;
        vm_offset_t      upl_offset;
-       off_t            start_upl_f_offset;
-       off_t            max_io_size;
-       int              io_size;
-       int              upl_size;
-       int              upl_needed_size;
-       int              pages_in_pl;
-       vm_offset_t      paddr;
+       vm_size_t        upl_size;
+       vm_size_t        upl_needed_size;
+       unsigned int     pages_in_pl;
        int              upl_flags;
+       int              bflag;
        kern_return_t    kret;
-       int              segflg;
-       struct iovec     *iov;
-       int              i;
+       unsigned int     i;
        int              force_data_sync;
-       int              error  = 0;
        int              retval = 0;
+       int              no_zero_fill = 0;
+       int              abort_flag = 0;
+       int              io_flag = 0;
+       int              misaligned = 0;
+       struct clios     iostate;
+       user_addr_t      iov_base;
+       u_int32_t        io_req_size;
+       u_int32_t        offset_in_file;
+       u_int32_t        offset_in_iovbase;
+       u_int32_t        io_size;
+       u_int32_t        io_min;
+        u_int32_t       xsize;
+       u_int32_t        devblocksize;
+       u_int32_t        mem_alignment_mask;
+       u_int32_t        max_rd_size  = MAX_UPL_TRANSFER * PAGE_SIZE;
+       u_int32_t        max_rd_ahead = MAX_PREFETCH;
+
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
-                    (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0);
+                    (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
+
+       iostate.io_completed = 0;
+       iostate.io_issued = 0;
+       iostate.io_error = 0;
+       iostate.io_wanted = 0;
+
+       devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
+       mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
+                    (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
+
+       if (devblocksize == 1) {
+               /*
+                * the AFP client advertises a devblocksize of 1
+                * however, its BLOCKMAP routine maps to physical
+                * blocks that are PAGE_SIZE in size...
+                * therefore we can't ask for I/Os that aren't page aligned
+                * or aren't multiples of PAGE_SIZE in size
+                * by setting devblocksize to PAGE_SIZE, we re-instate
+                * the old behavior we had before the mem_alignment_mask
+                * changes went in...
+                */
+               devblocksize = PAGE_SIZE;
+       }
+next_dread:
+       io_req_size = *read_length;
+       iov_base = uio_curriovbase(uio);
+
+        max_io_size = filesize - uio->uio_offset;
+
+       if ((off_t)io_req_size > max_io_size)
+               io_req_size = max_io_size;
 
+       offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
+       offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
+
+       if (offset_in_file || offset_in_iovbase) {
+               /*
+                * one of the 2 important offsets is misaligned
+                * so fire an I/O through the cache for this entire vector
+                */
+               misaligned = 1;
+       }
+       if (iov_base & (devblocksize - 1)) {
+               /*
+                * the offset in memory must be on a device block boundary
+                * so that we can guarantee that we can generate an
+                * I/O that ends on a page boundary in cluster_io
+                */
+               misaligned = 1;
+        }
        /*
-        * When we enter this routine, we know
-        *  -- the offset into the file is on a pagesize boundary
-        *  -- the resid is a page multiple
-        *  -- the resid will not exceed iov_len
+        * When we get to this point, we know...
+        *  -- the offset into the file is on a devblocksize boundary
         */
 
-       iov = uio->uio_iov;
-       while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) {
+       while (io_req_size && retval == 0) {
+               u_int32_t io_start;
+
+               if (cluster_hard_throttle_on(vp)) {
+                       max_rd_size  = HARD_THROTTLE_MAXSIZE;
+                       max_rd_ahead = HARD_THROTTLE_MAXSIZE - 1;
+               } else {
+                       max_rd_size  = MAX_UPL_TRANSFER * PAGE_SIZE;
+                       max_rd_ahead = MAX_PREFETCH;
+               }
+               io_start = io_size = io_req_size;
+
+               /*
+                * First look for pages already in the cache
+                * and move them to user space.
+                *
+                * cluster_copy_ubc_data returns the resid
+                * in io_size
+                */
+               retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
+                       
+               /*
+                * calculate the number of bytes actually copied
+                * starting size - residual
+                */
+               xsize = io_start - io_size;
+
+               io_req_size -= xsize;
+
+               /*
+                * check to see if we are finished with this request...
+                */
+               if (io_req_size == 0 || misaligned) {
+                       /*
+                        * see if there's another uio vector to
+                        * process that's of type IO_DIRECT
+                        *
+                        * break out of while loop to get there
+                        */
+                       break;
+               }
+               /*
+                * assume the request ends on a device block boundary
+                */
+               io_min = devblocksize;
 
-         max_io_size = filesize - uio->uio_offset;
+               /*
+                * we can handle I/O's in multiples of the device block size
+                * however, if io_size isn't a multiple of devblocksize we
+                * want to clip it back to the nearest page boundary since
+                * we are going to have to go through cluster_read_copy to
+                * deal with the 'overhang'... by clipping it to a PAGE_SIZE
+                * multiple, we avoid asking the drive for the same physical
+                * blocks twice.. once for the partial page at the end of the
+                * request and a 2nd time for the page we read into the cache
+                * (which overlaps the end of the direct read) in order to 
+                * get at the overhang bytes
+                */
+               if (io_size & (devblocksize - 1)) {
+                       /*
+                        * request does NOT end on a device block boundary
+                        * so clip it back to a PAGE_SIZE boundary
+                        */
+                       io_size &= ~PAGE_MASK;
+                       io_min = PAGE_SIZE;
+               }
+               if (retval || io_size < io_min) {
+                       /*
+                        * either an error or we only have the tail left to
+                        * complete via the copy path...
+                        * we may have already spun some portion of this request
+                        * off as async requests... we need to wait for the I/O
+                        * to complete before returning
+                        */
+                       goto wait_for_dreads;
+               }
+               if ((xsize = io_size) > max_rd_size)
+                       xsize = max_rd_size;
 
-         if (max_io_size < (off_t)((unsigned int)uio->uio_resid))
-             io_size = max_io_size;
-         else
-             io_size = uio->uio_resid;
+               io_size = 0;
 
-         /*
-          * We don't come into this routine unless
-          * UIO_USERSPACE is set.
-          */
-         segflg = uio->uio_segflg;
+               ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
 
-         uio->uio_segflg = UIO_PHYS_USERSPACE;
+               if (io_size == 0) {
+                       /*
+                        * a page must have just come into the cache
+                        * since the first page in this range is no
+                        * longer absent, go back and re-evaluate
+                        */
+                       continue;
+               }
+               iov_base = uio_curriovbase(uio);
 
-         /*
-          * First look for pages already in the cache
-          * and move them to user space.
-          */
-         while (io_size && (retval == 0)) {
-           upl_f_offset = uio->uio_offset;
+               upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
+               upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
 
-           /*
-            * If this call fails, it means the page is not
-            * in the page cache.
-            */
-           if (ubc_page_op(vp, upl_f_offset,
-                           UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) != KERN_SUCCESS)
-             break;
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
+                            (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
 
-           retval = uiomove((caddr_t)(paddr), PAGE_SIZE, uio);
-                               
-           ubc_page_op(vp, upl_f_offset, 
-                       UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
+               if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
+                       no_zero_fill = 1;
+                       abort_flag = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY;
+               } else {
+                       no_zero_fill = 0;
+                       abort_flag = UPL_ABORT_FREE_ON_EMPTY;
+               }
+               for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
+                       pages_in_pl = 0;
+                       upl_size = upl_needed_size;
+                       upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
+
+                       if (no_zero_fill)
+                               upl_flags |= UPL_NOZEROFILL;
+                       if (force_data_sync)
+                               upl_flags |= UPL_FORCE_DATA_SYNC;
+
+                       kret = vm_map_create_upl(current_map(),
+                                                (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
+                                                &upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
+
+                       if (kret != KERN_SUCCESS) {
+                               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
+                                            (int)upl_offset, upl_size, io_size, kret, 0);
+                               /*
+                                * failed to get pagelist
+                                *
+                                * we may have already spun some portion of this request
+                                * off as async requests... we need to wait for the I/O
+                                * to complete before returning
+                                */
+                               goto wait_for_dreads;
+                       }
+                       pages_in_pl = upl_size / PAGE_SIZE;
+                       pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
+
+                       for (i = 0; i < pages_in_pl; i++) {
+                               if (!upl_valid_page(pl, i))
+                                       break;            
+                       }
+                       if (i == pages_in_pl)
+                               break;
+
+                       ubc_upl_abort(upl, abort_flag);
+               }
+               if (force_data_sync >= 3) {
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
+                                    (int)upl_offset, upl_size, io_size, kret, 0);
                  
-           io_size     -= PAGE_SIZE;
-           KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 71)) | DBG_FUNC_NONE,
-                          (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0);
-         }
+                       goto wait_for_dreads;
+               }
+               /*
+                * Consider the possibility that upl_size wasn't satisfied.
+                */
+               if (upl_size < upl_needed_size) {
+                       if (upl_size && upl_offset == 0)
+                               io_size = upl_size;
+                       else
+                               io_size = 0;
+               }
+               if (io_size == 0) {
+                       ubc_upl_abort(upl, abort_flag);
+                       goto wait_for_dreads;
+               }
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
+                            (int)upl_offset, upl_size, io_size, kret, 0);
+
+               /*
+                * request asynchronously so that we can overlap
+                * the preparation of the next I/O
+                * if there are already too many outstanding reads
+                * wait until some have completed before issuing the next read
+                */
+               lck_mtx_lock(cl_mtxp);
 
-         uio->uio_segflg = segflg;
+               while ((iostate.io_issued - iostate.io_completed) > max_rd_ahead) {
+                       iostate.io_wanted = 1;
+                       msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
+               }       
+               lck_mtx_unlock(cl_mtxp);
                        
-         if (retval)
-           {
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                          (int)uio->uio_offset, uio->uio_resid, 2, retval, 0);       
-             return(retval);
-           }
-
-         /* If we are already finished with this read, then return */
-         if (io_size == 0)
-           {
-
-             KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                          (int)uio->uio_offset, uio->uio_resid, 3, io_size, 0);
-             return(0);
-           }
-
-         max_io_size = io_size;
-         if (max_io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
-           max_io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
-
-         start_upl_f_offset = uio->uio_offset;   /* this is page aligned in the file */
-         upl_f_offset = start_upl_f_offset;
-         io_size = 0;
-
-         while(io_size < max_io_size)
-           {
-
-             if(ubc_page_op(vp, upl_f_offset,
-                               UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) == KERN_SUCCESS)
-             {
-                       ubc_page_op(vp, upl_f_offset,
-                           UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
-                       break;
-             }
+               if (iostate.io_error) {
+                       /*
+                        * one of the earlier reads we issued ran into a hard error
+                        * don't issue any more reads, cleanup the UPL
+                        * that was just created but not used, then
+                        * go wait for any other reads to complete before
+                        * returning the error to the caller
+                        */
+                       ubc_upl_abort(upl, abort_flag);
 
-                 /*
-                  * Build up the io request parameters.
-                  */
+                       goto wait_for_dreads;
+               }
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
+                            (int)upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
 
-                 io_size += PAGE_SIZE;
-                 upl_f_offset += PAGE_SIZE;
-               }
+               if (no_zero_fill)
+                       io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO | bflag;
+               else
+                       io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO | CL_PRESERVE | bflag;
 
-             if (io_size == 0)
-               return(retval);
+               retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
 
-         upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
-         upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
+               /*
+                * update the uio structure
+                */
+               uio_update(uio, (user_size_t)io_size);
 
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
-                      (int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
+               io_req_size -= io_size;
 
-         for (force_data_sync = 0; force_data_sync < 3; force_data_sync++)
-           {
-             pages_in_pl = 0;
-             upl_size = upl_needed_size;
-             upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
+                            (int)upl, (int)uio->uio_offset, io_req_size, retval, 0);
 
-             kret = vm_map_get_upl(current_map(),
-                                   (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                                   &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, force_data_sync);
+       } /* end while */
 
-             if (kret != KERN_SUCCESS)
-               {
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
-                              (int)upl_offset, upl_size, io_size, kret, 0);
-                 
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                              (int)uio->uio_offset, uio->uio_resid, 4, retval, 0);
+       if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
 
-                 /* cluster_nocopy_read: failed to get pagelist */
-                 /* do not return kret here */
-                 return(retval);
-               }
+               retval = cluster_io_type(uio, read_type, read_length, 0);
+         
+               if (retval == 0 && *read_type == IO_DIRECT) {
 
-             pages_in_pl = upl_size / PAGE_SIZE;
-             pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
+                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
+                                    (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
 
-             for(i=0; i < pages_in_pl; i++)
-               {
-                 if (!upl_valid_page(pl, i))
-                   break;                
+                       goto next_dread;
                }
-             if (i == pages_in_pl)
-               break;
+       }
 
-             ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                 UPL_ABORT_FREE_ON_EMPTY);
-           }
+wait_for_dreads:
+       if (iostate.io_issued) {
+               /*
+                * make sure all async reads that are part of this stream
+                * have completed before we return
+                */
+               lck_mtx_lock(cl_mtxp);
 
-         if (force_data_sync >= 3)
-           {
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
-                              (int)upl_offset, upl_size, io_size, kret, 0);
-                 
-                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                              (int)uio->uio_offset, uio->uio_resid, 5, retval, 0);
-             return(retval);
-           }
-         /*
-          * Consider the possibility that upl_size wasn't satisfied.
-          */
-         if (upl_size != upl_needed_size)
-           io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
-
-         if (io_size == 0)
-           {
-             ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                  UPL_ABORT_FREE_ON_EMPTY);
-             return(retval);
-           }
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
-                      (int)upl_offset, upl_size, io_size, kret, 0);
-
-         /*
-          * issue a synchronous read to cluster_io
-          */
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
-                      (int)upl, (int)upl_offset, (int)start_upl_f_offset, io_size, 0);
-
-         error = cluster_io(vp, upl, upl_offset, start_upl_f_offset,
-                            io_size, devblocksize, CL_READ| CL_NOZERO, (struct buf *)0);
-
-         if (error == 0) {
-           /*
-            * The cluster_io read completed successfully,
-            * update the uio structure and commit.
-            */
-
-           ubc_upl_commit_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                       UPL_COMMIT_SET_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
-           
-           iov->iov_base += io_size;
-           iov->iov_len -= io_size;
-           uio->uio_resid -= io_size;
-           uio->uio_offset += io_size;
-         }
-         else {
-           ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, 
-                                  UPL_ABORT_FREE_ON_EMPTY);
-         }
-
-         KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
-                      (int)upl, (int)uio->uio_offset, (int)uio->uio_resid, error, 0);
-
-         if (retval == 0)
-           retval = error;
+               while (iostate.io_issued != iostate.io_completed) {
+                       iostate.io_wanted = 1;
+                       msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
+               }       
+               lck_mtx_unlock(cl_mtxp);
+       }
 
-       } /* end while */
+       if (iostate.io_error)
+               retval = iostate.io_error;
 
+       if (io_req_size && retval == 0) {
+               /*
+                * we couldn't handle the tail of this request in DIRECT mode
+                * so fire it through the copy path
+                */
+               retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
 
+               *read_type = IO_UNKNOWN;
+       }
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
-                    (int)uio->uio_offset, (int)uio->uio_resid, 6, retval, 0);
+                    (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
 
        return (retval);
 }
 
 
 static int
-cluster_phys_read(vp, uio, filesize)
-       struct vnode *vp;
-       struct uio   *uio;
-       off_t        filesize;
+cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
+                   int (*callback)(buf_t, void *), void *callback_arg, int flags)
 {
-       upl_t            upl;
+       upl_page_info_t *pl;
+       upl_t            upl[MAX_VECTS];
        vm_offset_t      upl_offset;
+       addr64_t         dst_paddr = 0;
+       user_addr_t      iov_base;
        off_t            max_size;
-       int              io_size;
-       int              upl_size;
-       int              upl_needed_size;
-       int              pages_in_pl;
+       vm_size_t        upl_size;
+       vm_size_t        upl_needed_size;
+       mach_msg_type_number_t  pages_in_pl;
        int              upl_flags;
        kern_return_t    kret;
-       struct iovec     *iov;
-       int              error;
+       struct clios     iostate;
+       int              error= 0;
+       int              cur_upl = 0;
+       int              num_upl = 0;
+       int              n;
+        u_int32_t       xsize;
+       u_int32_t        io_size;
+       u_int32_t        devblocksize;
+       u_int32_t        mem_alignment_mask;
+       u_int32_t        tail_size = 0;
+       int              bflag;
+
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
 
        /*
         * When we enter this routine, we know
-        *  -- the resid will not exceed iov_len
-        *  -- the target address is physically contiguous
+        *  -- the read_length will not exceed the current iov_len
+        *  -- the target address is physically contiguous for read_length
         */
+       cluster_syncup(vp, filesize, callback, callback_arg);
 
-       iov = uio->uio_iov;
+       devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
+       mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
+
+       iostate.io_completed = 0;
+       iostate.io_issued = 0;
+       iostate.io_error = 0;
+       iostate.io_wanted = 0;
+
+next_cread:
+       io_size = *read_length;
 
        max_size = filesize - uio->uio_offset;
 
-       if (max_size < (off_t)((unsigned int)iov->iov_len))
-           io_size = max_size;
-       else
-           io_size = iov->iov_len;
+       if (io_size > max_size)
+               io_size = max_size;
 
-       upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64;
+       iov_base = uio_curriovbase(uio);
+
+       upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
        upl_needed_size = upl_offset + io_size;
 
        pages_in_pl = 0;
        upl_size = upl_needed_size;
-       upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL;
+       upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
 
-       kret = vm_map_get_upl(current_map(),
-                             (vm_offset_t)iov->iov_base & ~PAGE_MASK,
-                             &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
 
-       if (kret != KERN_SUCCESS)
-         {
-           /* cluster_phys_read: failed to get pagelist */
-           return(EINVAL);
-         }
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
+                    (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
 
-       /*
-        * Consider the possibility that upl_size wasn't satisfied.
-        */
-       if (upl_size < upl_needed_size)
-         {
-           ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
-           return(EINVAL);
-         }
+       kret = vm_map_get_upl(current_map(),
+                             (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
+                             &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
 
-       /*
-        * issue a synchronous read to cluster_io
-        */
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
+                    (int)upl_offset, upl_size, io_size, kret, 0);
+
+       if (kret != KERN_SUCCESS) {
+               /*
+                * failed to get pagelist
+                */
+               error = EINVAL;
+               goto wait_for_creads;
+       }
+       num_upl++;
+
+       if (upl_size < upl_needed_size) {
+               /*
+                * The upl_size wasn't satisfied.
+                */
+               error = EINVAL;
+               goto wait_for_creads;
+       }
+       pl = ubc_upl_pageinfo(upl[cur_upl]);
+
+       dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
+
+       while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
+               u_int32_t   head_size;
+
+               head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
+
+               if (head_size > io_size)
+                       head_size = io_size;
+
+               error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
+
+               if (error)
+                       goto wait_for_creads;
+
+               upl_offset += head_size;
+               dst_paddr  += head_size;
+               io_size    -= head_size;
+
+               iov_base   += head_size;
+       }
+       if ((u_int32_t)iov_base & mem_alignment_mask) {
+               /*
+                * request doesn't set up on a memory boundary
+                * the underlying DMA engine can handle...
+                * return an error instead of going through
+                * the slow copy path since the intent of this
+                * path is direct I/O to device memory
+                */
+               error = EINVAL;
+               goto wait_for_creads;
+       }
+
+       tail_size = io_size & (devblocksize - 1);
+
+       io_size  -= tail_size;
+
+       while (io_size && error == 0) {
+
+               if (io_size > MAX_IO_CONTIG_SIZE)
+                       xsize = MAX_IO_CONTIG_SIZE;
+               else
+                       xsize = io_size;
+               /*
+                * request asynchronously so that we can overlap
+                * the preparation of the next I/O... we'll do
+                * the commit after all the I/O has completed
+                * since its all issued against the same UPL
+                * if there are already too many outstanding reads
+                * wait until some have completed before issuing the next
+                */
+               if (iostate.io_issued) {
+                       lck_mtx_lock(cl_mtxp);
+
+                       while ((iostate.io_issued - iostate.io_completed) > (3 * MAX_IO_CONTIG_SIZE)) {
+                               iostate.io_wanted = 1;
+                               msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
+                       }       
+                       lck_mtx_unlock(cl_mtxp);
+               }
+               if (iostate.io_error) {
+                       /*
+                        * one of the earlier reads we issued ran into a hard error
+                        * don't issue any more reads...
+                        * go wait for any other reads to complete before
+                        * returning the error to the caller
+                        */
+                       goto wait_for_creads;
+               }
+               error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize, 
+                                  CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
+                                  (buf_t)NULL, &iostate, callback, callback_arg);
+               /*
+                * The cluster_io read was issued successfully,
+                * update the uio structure
+                */
+               if (error == 0) {
+                       uio_update(uio, (user_size_t)xsize);
+
+                       dst_paddr  += xsize;
+                       upl_offset += xsize;
+                       io_size    -= xsize;
+               }
+       }
+       if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
+
+               error = cluster_io_type(uio, read_type, read_length, 0);
+         
+               if (error == 0 && *read_type == IO_CONTIG) {
+                       cur_upl++;
+                       goto next_cread;
+               }
+       } else
+               *read_type = IO_UNKNOWN;
+
+wait_for_creads:
+       /*
+        * make sure all async reads that are part of this stream
+        * have completed before we proceed
+        */
+       lck_mtx_lock(cl_mtxp);
+
+       while (iostate.io_issued != iostate.io_completed) {
+               iostate.io_wanted = 1;
+               msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
+       }       
+       lck_mtx_unlock(cl_mtxp);
+
+       if (iostate.io_error)
+               error = iostate.io_error;
+
+       if (error == 0 && tail_size)
+               error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
+
+       for (n = 0; n < num_upl; n++)
+               /*
+                * just release our hold on each physically contiguous
+                * region without changing any state
+                */
+               ubc_upl_abort(upl[n], 0);
+       
+       return (error);
+}
+
+
+static int
+cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
+{
+        user_size_t     iov_len;
+       user_addr_t      iov_base = 0;
+       upl_t            upl;
+       vm_size_t        upl_size;
+       int              upl_flags;
+       int              retval = 0;
+
+        /*
+        * skip over any emtpy vectors
+        */
+        uio_update(uio, (user_size_t)0);
+
+       iov_len = uio_curriovlen(uio);
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, (int)uio, (int)iov_len, 0, 0, 0);
+
+       if (iov_len) {
+               iov_base = uio_curriovbase(uio);
+               /*
+                * make sure the size of the vector isn't too big...
+                * internally, we want to handle all of the I/O in
+                * chunk sizes that fit in a 32 bit int
+                */
+               if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE)
+                       upl_size = MAX_IO_REQUEST_SIZE;
+               else
+                       upl_size = (u_int32_t)iov_len;
+
+               upl_flags = UPL_QUERY_OBJECT_TYPE;
+  
+               if ((vm_map_get_upl(current_map(),
+                                   (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
+                                   &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
+                       /*
+                        * the user app must have passed in an invalid address
+                        */
+                       retval = EFAULT;
+               }
+               if (upl_size == 0)
+                       retval = EFAULT;
+
+               *io_length = upl_size;
+
+               if (upl_flags & UPL_PHYS_CONTIG)
+                       *io_type = IO_CONTIG;
+               else if (iov_len >= min_length)
+                       *io_type = IO_DIRECT;
+               else
+                       *io_type = IO_COPY;
+       } else {
+               /*
+                * nothing left to do for this uio
+                */
+               *io_length = 0;
+               *io_type   = IO_UNKNOWN;
+       }
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, (int)iov_base, *io_type, *io_length, retval, 0);
+
+       return (retval);
+}
 
-       error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
-                          io_size, 0, CL_READ| CL_NOZERO | CL_DEV_MEMORY, (struct buf *)0);
-
-       if (error == 0)
-         {
-           /*
-            * The cluster_io read completed successfully,
-            * update the uio structure and commit.
-            */
-
-           ubc_upl_commit_range(upl, 0, upl_size, UPL_COMMIT_FREE_ON_EMPTY);
-           
-           iov->iov_base += io_size;
-           iov->iov_len -= io_size;
-           uio->uio_resid -= io_size;
-           uio->uio_offset += io_size;
-         }
-       else
-           ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
-       
-       return (error);
-}
 
 /*
  * generate advisory I/O's in the largest chunks possible
  * the completed pages will be released into the VM cache
  */
 int
-advisory_read(vp, filesize, f_offset, resid, devblocksize)
-       struct vnode *vp;
-       off_t         filesize;
-       off_t         f_offset;
-       int           resid;
-       int           devblocksize;
+advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
+{
+        return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
+}
+
+int
+advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
 {
        upl_page_info_t *pl;
        upl_t            upl;
@@ -2836,12 +4191,13 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
        kern_return_t    kret;
        int              retval = 0;
        int              issued_io;
+       int              skip_range;
 
-       if (!UBCINFOEXISTS(vp))
+       if ( !UBCINFOEXISTS(vp))
                return(EINVAL);
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
-                    (int)f_offset, resid, (int)filesize, devblocksize, 0);
+                    (int)f_offset, resid, (int)filesize, 0, 0);
 
        while (resid && f_offset < filesize && retval == 0) {
                /*
@@ -2864,14 +4220,45 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
                upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
                if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
                        upl_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+
+               skip_range = 0;
+               /*
+                * return the number of contiguously present pages in the cache
+                * starting at upl_f_offset within the file
+                */
+               ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
+
+               if (skip_range) {
+                       /*
+                        * skip over pages already present in the cache
+                        */
+                       io_size = skip_range - start_offset;
+
+                       f_offset += io_size;
+                       resid    -= io_size;
+
+                       if (skip_range == upl_size)
+                               continue;
+                       /*
+                        * have to issue some real I/O
+                        * at this point, we know it's starting on a page boundary
+                        * because we've skipped over at least the first page in the request
+                        */
+                       start_offset = 0;
+                       upl_f_offset += skip_range;
+                       upl_size     -= skip_range;
+               }
                pages_in_upl = upl_size / PAGE_SIZE;
 
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
+                            (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
+
                kret = ubc_create_upl(vp, 
-                                               upl_f_offset,
-                                               upl_size,
-                                               &upl,
-                                               &pl,
-                                               UPL_RET_ONLY_ABSENT);
+                                     upl_f_offset,
+                                     upl_size,
+                                     &upl,
+                                     &pl,
+                                     UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
                if (kret != KERN_SUCCESS)
                        return(retval);
                issued_io = 0;
@@ -2888,7 +4275,7 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
                pages_in_upl = last_pg + 1;
 
 
-               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_NONE,
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
                             (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
 
 
@@ -2931,8 +4318,8 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
                                /*
                                 * issue an asynchronous read to cluster_io
                                 */
-                               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, devblocksize,
-                                                   CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE, (struct buf *)0);
+                               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
+                                                   CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
 
                                issued_io = 1;
                        }
@@ -2956,171 +4343,280 @@ advisory_read(vp, filesize, f_offset, resid, devblocksize)
 
 
 int
-cluster_push(vp)
-        struct vnode *vp;
+cluster_push(vnode_t vp, int flags)
 {
-        int  retval;
+        return cluster_push_ext(vp, flags, NULL, NULL);
+}
 
-       if (!UBCINFOEXISTS(vp) || vp->v_clen == 0) {
-               vp->v_flag &= ~VHASDIRTY;
-               return(0);
+
+int
+cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+        int    retval;
+       struct  cl_writebehind *wbp;
+
+       if ( !UBCINFOEXISTS(vp)) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, (int)vp, flags, 0, -1, 0);
+               return (0);
        }
+       /* return if deferred write is set */
+       if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
+               return (0);
+       }
+       if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, (int)vp, flags, 0, -2, 0);
+               return (0);
+       }
+       if (wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
+               lck_mtx_unlock(&wbp->cl_lockw);
 
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, (int)vp, flags, 0, -3, 0);
+               return(0);
+       }
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
-                    vp->v_flag & VHASDIRTY, vp->v_clen, 0, 0, 0);
+                    (int)wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
+
+       if (wbp->cl_scmap) {
+               sparse_cluster_push(wbp, vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
+
+               retval = 1;
+       } else 
+               retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
+
+       lck_mtx_unlock(&wbp->cl_lockw);
 
-       if (vp->v_flag & VHASDIRTY) {
-               daddr_t start_pg;
-               daddr_t last_pg;
-               daddr_t end_pg;
+       if (flags & IO_SYNC)
+               (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
 
-               start_pg = vp->v_cstart;
-               end_pg   = vp->v_lastw;
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
+                    (int)wbp->cl_scmap, wbp->cl_number, retval, 0, 0);
+
+       return (retval);
+}
 
-               vp->v_flag &= ~VHASDIRTY;
-               vp->v_clen = 0;
 
-               while (start_pg < end_pg) {
-                       last_pg = start_pg + MAX_UPL_TRANSFER;
+__private_extern__ void
+cluster_release(struct ubc_info *ubc)
+{
+        struct cl_writebehind *wbp;
+       struct cl_readahead   *rap;
 
-                       if (last_pg > end_pg)
-                               last_pg = end_pg;
+       if ((wbp = ubc->cl_wbehind)) {
 
-                       cluster_push_x(vp, ubc_getsize(vp), start_pg, last_pg, 0);
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, (int)ubc, (int)wbp->cl_scmap, wbp->cl_scdirty, 0, 0);
 
-                       start_pg = last_pg;
-               }
-               return (1);
+               if (wbp->cl_scmap)
+                       vfs_drt_control(&(wbp->cl_scmap), 0);
+       } else {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, (int)ubc, 0, 0, 0, 0);
        }
-       retval = cluster_try_push(vp, ubc_getsize(vp), 0, 1);
 
-       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
-                    vp->v_flag & VHASDIRTY, vp->v_clen, retval, 0, 0);
+       rap = ubc->cl_rahead;
 
-       return (retval);
+       if (wbp != NULL) {
+               lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
+               FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
+       }
+       if ((rap = ubc->cl_rahead)) {
+               lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
+               FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
+       }
+       ubc->cl_rahead  = NULL;
+       ubc->cl_wbehind = NULL;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, (int)ubc, (int)rap, (int)wbp, 0, 0);
 }
 
 
 static int
-cluster_try_push(vp, EOF, can_delay, push_all)
-        struct vnode *vp;
-       off_t  EOF;
-       int    can_delay;
-       int    push_all;
+cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int (*callback)(buf_t, void *), void *callback_arg)
 {
         int cl_index;
        int cl_index1;
        int min_index;
         int cl_len;
-       int cl_total;
-       int cl_pushed;
-       struct v_cluster l_clusters[MAX_CLUSTERS];
+       int cl_pushed = 0;
+       struct cl_wextent l_clusters[MAX_CLUSTERS];
 
+       /*
+        * the write behind context exists and has
+        * already been locked...
+        */
+       if (wbp->cl_number == 0)
+               /*
+                * no clusters to push
+                * return number of empty slots
+                */
+               return (MAX_CLUSTERS);
+        
        /*
         * make a local 'sorted' copy of the clusters
-        * and clear vp->v_clen so that new clusters can
+        * and clear wbp->cl_number so that new clusters can
         * be developed
         */
-       for (cl_index = 0; cl_index < vp->v_clen; cl_index++) {
-               for (min_index = -1, cl_index1 = 0; cl_index1 < vp->v_clen; cl_index1++) {
-                       if (vp->v_clusters[cl_index1].start_pg == vp->v_clusters[cl_index1].last_pg)
+       for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
+               for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
+                       if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr)
                                continue;
                        if (min_index == -1)
                                min_index = cl_index1;
-                       else if (vp->v_clusters[cl_index1].start_pg < vp->v_clusters[min_index].start_pg)
+                       else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr)
                                min_index = cl_index1;
                }
                if (min_index == -1)
                        break;
-               l_clusters[cl_index].start_pg = vp->v_clusters[min_index].start_pg;
-               l_clusters[cl_index].last_pg  = vp->v_clusters[min_index].last_pg;
+               l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
+               l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
+               l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
 
-               vp->v_clusters[min_index].start_pg = vp->v_clusters[min_index].last_pg;
+               wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
        }
-       cl_len     = cl_index;
-       vp->v_clen = 0;
+       wbp->cl_number = 0;
+
+       cl_len = cl_index;
+
+       if ( (push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS ) {
+               int   i;
+               
+               /*
+                * determine if we appear to be writing the file sequentially
+                * if not, by returning without having pushed any clusters
+                * we will cause this vnode to be pushed into the sparse cluster mechanism
+                * used for managing more random I/O patterns
+                *
+                * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
+                * that's why we're in try_push with PUSH_DELAY...
+                *
+                * check to make sure that all the clusters except the last one are 'full'... and that each cluster
+                * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
+                * so we can just make a simple pass through, up to, but not including the last one...
+                * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
+                * are sequential
+                * 
+                * we let the last one be partial as long as it was adjacent to the previous one...
+                * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
+                * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
+                */
+               for (i = 0; i < MAX_CLUSTERS - 1; i++) {
+                       if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != MAX_CLUSTER_SIZE)
+                               goto dont_try;
+                       if (l_clusters[i].e_addr != l_clusters[i+1].b_addr)
+                               goto dont_try;
+               }
+       }
+       /*
+        * drop the lock while we're firing off the I/Os...
+        * this is safe since I'm working off of a private sorted copy
+        * of the clusters, and I'm going to re-evaluate the public
+        * state after I retake the lock
+        *
+        * we need to drop it to avoid a lock inversion when trying to
+        * grab pages into the UPL... another thread in 'write' may
+        * have these pages in its UPL and be blocked trying to
+        * gain the write-behind lock for this vnode
+        */
+       lck_mtx_unlock(&wbp->cl_lockw);
+
+       for (cl_index = 0; cl_index < cl_len; cl_index++) {
+               int     flags;
+               struct  cl_extent cl;
 
-       for (cl_pushed = 0, cl_index = 0; cl_index < cl_len; cl_index++) {
                /*
-                * try to push each cluster in turn...  cluster_push_x may not
-                * push the cluster if can_delay is TRUE and the cluster doesn't
-                * meet the critera for an immediate push
+                * try to push each cluster in turn...
                 */
-               if (cluster_push_x(vp, EOF, l_clusters[cl_index].start_pg, l_clusters[cl_index].last_pg, can_delay)) {
-                       l_clusters[cl_index].start_pg = 0;
-                       l_clusters[cl_index].last_pg  = 0;
+               if (l_clusters[cl_index].io_flags & CLW_IONOCACHE)
+                       flags = IO_NOCACHE;
+               else
+                       flags = 0;
 
-                       cl_pushed++;
+               if ((l_clusters[cl_index].io_flags & CLW_IOPASSIVE) || (push_flag & IO_PASSIVE))
+                       flags |= IO_PASSIVE;
 
-                       if (push_all == 0)
-                               break;
-               }
+               if (push_flag & PUSH_SYNC)
+                       flags |= IO_SYNC;
+
+               cl.b_addr = l_clusters[cl_index].b_addr;
+               cl.e_addr = l_clusters[cl_index].e_addr;
+
+               cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg);
+
+               l_clusters[cl_index].b_addr = 0;
+               l_clusters[cl_index].e_addr = 0;
+
+               cl_pushed++;
+
+               if ( !(push_flag & PUSH_ALL) )
+                       break;
        }
+       lck_mtx_lock(&wbp->cl_lockw);
+
+dont_try:
        if (cl_len > cl_pushed) {
               /*
                * we didn't push all of the clusters, so
                * lets try to merge them back in to the vnode
                */
-               if ((MAX_CLUSTERS - vp->v_clen) < (cl_len - cl_pushed)) {
+               if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
                        /*
                         * we picked up some new clusters while we were trying to
-                        * push the old ones (I don't think this can happen because
-                        * I'm holding the lock, but just in case)... the sum of the
+                        * push the old ones... this can happen because I've dropped
+                        * the vnode lock... the sum of the
                         * leftovers plus the new cluster count exceeds our ability
-                        * to represent them, so fall back to the VHASDIRTY mechanism
+                        * to represent them, so switch to the sparse cluster mechanism
+                        *
+                        * collect the active public clusters...
                         */
-                       for (cl_index = 0; cl_index < cl_len; cl_index++) {
-                               if (l_clusters[cl_index].start_pg == l_clusters[cl_index].last_pg)
+                       sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
+
+                       for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
+                               if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
                                        continue;
+                               wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
+                               wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
+                               wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
 
-                               if (l_clusters[cl_index].start_pg < vp->v_cstart)
-                                       vp->v_cstart = l_clusters[cl_index].start_pg;
-                               if (l_clusters[cl_index].last_pg > vp->v_lastw)
-                                       vp->v_lastw = l_clusters[cl_index].last_pg;
+                               cl_index1++;
                        }
-                       vp->v_flag |= VHASDIRTY;
+                       /*
+                        * update the cluster count
+                        */
+                       wbp->cl_number = cl_index1;
+
+                       /*
+                        * and collect the original clusters that were moved into the 
+                        * local storage for sorting purposes
+                        */
+                       sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
+
                } else {
                        /*
                         * we've got room to merge the leftovers back in
                         * just append them starting at the next 'hole'
-                        * represented by vp->v_clen
+                        * represented by wbp->cl_number
                         */
-                       for (cl_index = 0, cl_index1 = vp->v_clen; cl_index < cl_len; cl_index++) {
-                               if (l_clusters[cl_index].start_pg == l_clusters[cl_index].last_pg)
+                       for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
+                               if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
                                        continue;
 
-                               vp->v_clusters[cl_index1].start_pg = l_clusters[cl_index].start_pg;
-                               vp->v_clusters[cl_index1].last_pg  = l_clusters[cl_index].last_pg;
+                               wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
+                               wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
+                               wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
 
-                               if (cl_index1 == 0) {
-                                       vp->v_cstart = l_clusters[cl_index].start_pg;
-                                       vp->v_lastw  = l_clusters[cl_index].last_pg;
-                               } else {
-                                       if (l_clusters[cl_index].start_pg < vp->v_cstart)
-                                               vp->v_cstart = l_clusters[cl_index].start_pg;
-                                       if (l_clusters[cl_index].last_pg > vp->v_lastw)
-                                               vp->v_lastw = l_clusters[cl_index].last_pg;
-                               }
                                cl_index1++;
                        }
                        /*
                         * update the cluster count
                         */
-                       vp->v_clen = cl_index1;
+                       wbp->cl_number = cl_index1;
                }
        }
-       return(MAX_CLUSTERS - vp->v_clen);
+       return (MAX_CLUSTERS - wbp->cl_number);
 }
 
 
 
 static int
-cluster_push_x(vp, EOF, first, last, can_delay)
-        struct vnode *vp;
-       off_t  EOF;
-       daddr_t first;
-       daddr_t last;
-       int    can_delay;
+cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int (*callback)(buf_t, void *), void *callback_arg)
 {
        upl_page_info_t *pl;
        upl_t            upl;
@@ -3132,20 +4628,28 @@ cluster_push_x(vp, EOF, first, last, can_delay)
        int              last_pg;
        int              io_size;
        int              io_flags;
+       int              upl_flags;
+       int              bflag;
        int              size;
+       int              error = 0;
+       int              retval;
        kern_return_t    kret;
 
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
 
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
-                    vp->v_clen, first, last, EOF, 0);
+                    (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
 
-       if ((pages_in_upl = last - first) == 0) {
+       if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
                KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
 
-               return (1);
+               return (0);
        }
        upl_size = pages_in_upl * PAGE_SIZE;
-       upl_f_offset = ((off_t)first) * PAGE_SIZE_64;
+       upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
 
        if (upl_f_offset + upl_size >= EOF) {
 
@@ -3157,82 +4661,1234 @@ cluster_push_x(vp, EOF, first, last, can_delay)
                         */
                        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
 
-                       return(1);
+                       return(0);
                }
                size = EOF - upl_f_offset;
 
-               upl_size = (size + (PAGE_SIZE - 1) ) & ~(PAGE_SIZE - 1);
+               upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
                pages_in_upl = upl_size / PAGE_SIZE;
-       } else {
-               if (can_delay && (pages_in_upl < (MAX_UPL_TRANSFER - (MAX_UPL_TRANSFER / 2))))
-                       return(0);
+       } else
                size = upl_size;
-       }
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
+
+       /*
+        * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
+        * 
+        * - only pages that are currently dirty are returned... these are the ones we need to clean
+        * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
+        * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
+        * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if 
+        *   someone dirties this page while the I/O is in progress, we don't lose track of the new state
+        *
+        * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
+        */
+
+       if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE))
+               upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
+       else
+               upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
+
        kret = ubc_create_upl(vp, 
                                upl_f_offset,
                                upl_size,
                                &upl,
                                &pl,
-                               UPL_RET_ONLY_DIRTY);
+                               upl_flags);
        if (kret != KERN_SUCCESS)
                panic("cluster_push: failed to get pagelist");
 
-       if (can_delay) {
-               int  num_of_dirty;
-       
-               for (num_of_dirty = 0, start_pg = 0; start_pg < pages_in_upl; start_pg++) {
-                       if (upl_valid_page(pl, start_pg) && upl_dirty_page(pl, start_pg))
-                               num_of_dirty++;
-               }
-               if (num_of_dirty < pages_in_upl / 2) {
-                       ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
-
-                       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 0, 2, num_of_dirty, (pages_in_upl / 2), 0);
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, (int)upl, upl_f_offset, 0, 0, 0);
 
-                       return(0);
-               }
+       /*
+        * since we only asked for the dirty pages back
+        * it's possible that we may only get a few or even none, so...
+        * before we start marching forward, we must make sure we know
+        * where the last present page is in the UPL, otherwise we could
+        * end up working with a freed upl due to the FREE_ON_EMPTY semantics
+        * employed by commit_range and abort_range.
+        */
+       for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
+               if (upl_page_present(pl, last_pg))
+                       break;
        }
-       last_pg = 0;
+       pages_in_upl = last_pg + 1;
 
-       while (size) {
+       if (pages_in_upl == 0) {
+               ubc_upl_abort(upl, 0);
+
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
+               return(0);
+       }         
 
+       for (last_pg = 0; last_pg < pages_in_upl; ) {
+               /*
+                * find the next dirty page in the UPL
+                * this will become the first page in the 
+                * next I/O to generate
+                */
                for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
-                       if (upl_valid_page(pl, start_pg) && upl_dirty_page(pl, start_pg))
+                       if (upl_dirty_page(pl, start_pg))
                                break;
+                       if (upl_page_present(pl, start_pg))
+                               /*
+                                * RET_ONLY_DIRTY will return non-dirty 'precious' pages
+                                * just release these unchanged since we're not going
+                                * to steal them or change their state
+                                */
+                               ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
                }
-               if (start_pg > last_pg) {
-                       io_size = (start_pg - last_pg) * PAGE_SIZE;
-
-                       ubc_upl_abort_range(upl, last_pg * PAGE_SIZE, io_size,
-                                       UPL_ABORT_FREE_ON_EMPTY);
+               if (start_pg >= pages_in_upl)
+                       /*
+                        * done... no more dirty pages to push
+                        */
+                       break;
+               if (start_pg > last_pg)
+                       /*
+                        * skipped over some non-dirty pages
+                        */
+                       size -= ((start_pg - last_pg) * PAGE_SIZE);
 
-                       if (io_size < size)
-                               size -= io_size;
-                       else
-                               break;
-               }
+               /*
+                * find a range of dirty pages to write
+                */
                for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
-                       if (!upl_valid_page(pl, last_pg) || !upl_dirty_page(pl, last_pg))
+                       if (!upl_dirty_page(pl, last_pg))
                                break;
                }
                upl_offset = start_pg * PAGE_SIZE;
 
                io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
 
-               if (vp->v_flag & VNOCACHE_DATA)
-                       io_flags = CL_COMMIT | CL_AGE | CL_ASYNC | CL_DUMP;
-               else
-                       io_flags = CL_COMMIT | CL_AGE | CL_ASYNC;
+               io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
 
-               while (vp->v_numoutput >= ASYNC_THROTTLE) {
-                       vp->v_flag |= VTHROTTLED;
-                       tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_push", 0);
-               }
-               cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, vp->v_ciosiz, io_flags, (struct buf *)0);
+               if ( !(flags & IO_SYNC))
+                       io_flags |= CL_ASYNC;
+
+               retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
+                                   io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
+
+               if (error == 0 && retval)
+                       error = retval;
 
                size -= io_size;
        }
        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, 0, 0, 0);
 
-       return(1);
+       return(error);
+}
+
+
+/*
+ * sparse_cluster_switch is called with the write behind lock held
+ */
+static void
+sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
+{
+        int    cl_index;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, (int)vp, (int)wbp->cl_scmap, wbp->cl_scdirty, 0, 0);
+
+       if (wbp->cl_scmap == NULL)
+               wbp->cl_scdirty = 0;
+
+       for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
+               int       flags;
+               struct cl_extent cl;
+
+               for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
+
+                       if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
+                               if (flags & UPL_POP_DIRTY) {
+                                       cl.e_addr = cl.b_addr + 1;
+
+                                       sparse_cluster_add(wbp, vp, &cl, EOF, callback, callback_arg);
+                               }
+                       }
+               }
+       }
+       wbp->cl_number = 0;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, (int)vp, (int)wbp->cl_scmap, wbp->cl_scdirty, 0, 0);
+}
+
+
+/*
+ * sparse_cluster_push is called with the write behind lock held
+ */
+static void
+sparse_cluster_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int (*callback)(buf_t, void *), void *callback_arg)
+{
+        struct cl_extent cl;
+        off_t          offset;
+       u_int           length;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, (int)vp, (int)wbp->cl_scmap, wbp->cl_scdirty, push_flag, 0);
+
+       if (push_flag & PUSH_ALL)
+               vfs_drt_control(&(wbp->cl_scmap), 1);
+
+       for (;;) {
+               if (vfs_drt_get_cluster(&(wbp->cl_scmap), &offset, &length) != KERN_SUCCESS)
+                       break;
+
+               cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
+               cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
+
+               wbp->cl_scdirty -= (int)(cl.e_addr - cl.b_addr);
+
+               /*
+                * drop the lock while we're firing off the I/Os...
+                * this is safe since I've already updated the state
+                * this lock is protecting and I'm going to re-evaluate
+                * the public state after I retake the lock
+                *
+                * we need to drop it to avoid a lock inversion when trying to
+                * grab pages into the UPL... another thread in 'write' may
+                * have these pages in its UPL and be blocked trying to
+                * gain the write-behind lock for this vnode
+                */
+               lck_mtx_unlock(&wbp->cl_lockw);
+
+               cluster_push_now(vp, &cl, EOF, push_flag & IO_PASSIVE, callback, callback_arg);
+
+               lck_mtx_lock(&wbp->cl_lockw);
+
+               if ( !(push_flag & PUSH_ALL) )
+                       break;
+       }
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, (int)vp, (int)wbp->cl_scmap, wbp->cl_scdirty, 0, 0);
+}
+
+
+/*
+ * sparse_cluster_add is called with the write behind lock held
+ */
+static void
+sparse_cluster_add(struct cl_writebehind *wbp, vnode_t vp, struct cl_extent *cl, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
+{
+        u_int  new_dirty;
+       u_int   length;
+       off_t   offset;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (int)wbp->cl_scmap, wbp->cl_scdirty, (int)cl->b_addr, (int)cl->e_addr, 0);
+
+       offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
+       length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
+
+       while (vfs_drt_mark_pages(&(wbp->cl_scmap), offset, length, &new_dirty) != KERN_SUCCESS) {
+               /*
+                * no room left in the map
+                * only a partial update was done
+                * push out some pages and try again
+                */
+               wbp->cl_scdirty += new_dirty;
+
+               sparse_cluster_push(wbp, vp, EOF, 0, callback, callback_arg);
+
+               offset += (new_dirty * PAGE_SIZE_64);
+               length -= (new_dirty * PAGE_SIZE);
+       }
+       wbp->cl_scdirty += new_dirty;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, (int)vp, (int)wbp->cl_scmap, wbp->cl_scdirty, 0, 0);
+}
+
+
+static int
+cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+        upl_page_info_t  *pl;
+        upl_t            upl;
+        addr64_t        ubc_paddr;
+        kern_return_t    kret;
+        int              error = 0;
+       int              did_read = 0;
+       int              abort_flags;
+       int              upl_flags;
+       int              bflag;
+
+       if (flags & IO_PASSIVE)
+           bflag = CL_PASSIVE;
+       else
+           bflag = 0;
+
+       upl_flags = UPL_SET_LITE;
+
+       if ( !(flags & CL_READ) ) {
+               /*
+                * "write" operation:  let the UPL subsystem know
+                * that we intend to modify the buffer cache pages
+                * we're gathering.
+                */
+               upl_flags |= UPL_WILL_MODIFY;
+       } else {
+               /*
+                * indicate that there is no need to pull the
+                * mapping for this page... we're only going
+                * to read from it, not modify it.
+                */
+               upl_flags |= UPL_FILE_IO;
+       }
+        kret = ubc_create_upl(vp,
+                              uio->uio_offset & ~PAGE_MASK_64,
+                              PAGE_SIZE,
+                              &upl,
+                              &pl,
+                              upl_flags);
+
+        if (kret != KERN_SUCCESS)
+                return(EINVAL);
+
+        if (!upl_valid_page(pl, 0)) {
+                /*
+                 * issue a synchronous read to cluster_io
+                 */
+                error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
+                                  CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
+                if (error) {
+                          ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
+
+                          return(error);
+                }
+               did_read = 1;
+        }
+        ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
+
+/*
+ *     NOTE:  There is no prototype for the following in BSD. It, and the definitions
+ *     of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
+ *     osfmk/ppc/mappings.h.  They are not included here because there appears to be no
+ *     way to do so without exporting them to kexts as well.
+ */
+       if (flags & CL_READ)
+//             copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk);    /* Copy physical to physical and flush the destination */
+               copypv(ubc_paddr, usr_paddr, xsize,        2 |        1 |        4);    /* Copy physical to physical and flush the destination */
+       else
+//             copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc);    /* Copy physical to physical and flush the source */
+               copypv(usr_paddr, ubc_paddr, xsize,        2 |        1 |        8);    /* Copy physical to physical and flush the source */
+       
+       if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
+               /*
+                * issue a synchronous write to cluster_io
+                */
+               error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
+                                  bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
+       }
+       if (error == 0) 
+               uio_update(uio, (user_size_t)xsize);
+
+       if (did_read)
+               abort_flags = UPL_ABORT_FREE_ON_EMPTY;
+       else
+               abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
+
+       ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
+       
+       return (error);
+}
+
+
+
+int
+cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
+{
+        int       pg_offset;
+       int       pg_index;
+        int      csize;
+       int       segflg;
+       int       retval = 0;
+       int       xsize;
+       upl_page_info_t *pl;
+
+       xsize = *io_resid;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
+                    (int)uio->uio_offset, upl_offset, xsize, 0, 0);
+
+       segflg = uio->uio_segflg;
+
+       switch(segflg) {
+
+         case UIO_USERSPACE32:
+         case UIO_USERISPACE32:
+               uio->uio_segflg = UIO_PHYS_USERSPACE32;
+               break;
+
+         case UIO_USERSPACE:
+         case UIO_USERISPACE:
+               uio->uio_segflg = UIO_PHYS_USERSPACE;
+               break;
+
+         case UIO_USERSPACE64:
+         case UIO_USERISPACE64:
+               uio->uio_segflg = UIO_PHYS_USERSPACE64;
+               break;
+
+         case UIO_SYSSPACE32:
+               uio->uio_segflg = UIO_PHYS_SYSSPACE32;
+               break;
+
+         case UIO_SYSSPACE:
+               uio->uio_segflg = UIO_PHYS_SYSSPACE;
+               break;
+
+         case UIO_SYSSPACE64:
+               uio->uio_segflg = UIO_PHYS_SYSSPACE64;
+               break;
+       }
+       pl = ubc_upl_pageinfo(upl);
+
+       pg_index  = upl_offset / PAGE_SIZE;
+       pg_offset = upl_offset & PAGE_MASK;
+       csize     = min(PAGE_SIZE - pg_offset, xsize);
+
+       while (xsize && retval == 0) {
+               addr64_t  paddr;
+
+               paddr = ((addr64_t)upl_phys_page(pl, pg_index) << 12) + pg_offset;
+
+               retval = uiomove64(paddr, csize, uio);
+
+               pg_index += 1;
+               pg_offset = 0;
+               xsize    -= csize;
+               csize     = min(PAGE_SIZE, xsize);
+       }
+       *io_resid = xsize;
+
+       uio->uio_segflg = segflg;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
+                    (int)uio->uio_offset, xsize, retval, segflg, 0);
+
+       return (retval);
+}
+
+
+int
+cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
+{
+
+       return (cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1));
+}
+
+
+static int
+cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
+{
+       int       segflg;
+       int       io_size;
+       int       xsize;
+       int       start_offset;
+       int       retval = 0;
+       memory_object_control_t  control;
+
+       io_size = *io_resid;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
+                    (int)uio->uio_offset, 0, io_size, 0, 0);
+
+       control = ubc_getobject(vp, UBC_FLAGS_NONE);
+
+       if (control == MEMORY_OBJECT_CONTROL_NULL) {
+               KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
+                            (int)uio->uio_offset, io_size, retval, 3, 0);
+
+               return(0);
+       }
+       segflg = uio->uio_segflg;
+
+       switch(segflg) {
+
+         case UIO_USERSPACE32:
+         case UIO_USERISPACE32:
+               uio->uio_segflg = UIO_PHYS_USERSPACE32;
+               break;
+
+         case UIO_USERSPACE64:
+         case UIO_USERISPACE64:
+               uio->uio_segflg = UIO_PHYS_USERSPACE64;
+               break;
+
+         case UIO_SYSSPACE32:
+               uio->uio_segflg = UIO_PHYS_SYSSPACE32;
+               break;
+
+         case UIO_SYSSPACE64:
+               uio->uio_segflg = UIO_PHYS_SYSSPACE64;
+               break;
+
+         case UIO_USERSPACE:
+         case UIO_USERISPACE:
+               uio->uio_segflg = UIO_PHYS_USERSPACE;
+               break;
+
+         case UIO_SYSSPACE:
+               uio->uio_segflg = UIO_PHYS_SYSSPACE;
+               break;
+       }
+
+       if ( (io_size = *io_resid) ) {
+               start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
+               xsize = uio_resid(uio);
+
+               retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
+                                                      start_offset, io_size, mark_dirty, take_reference);
+               xsize -= uio_resid(uio);
+               io_size -= xsize;
+       }
+       uio->uio_segflg = segflg;
+       *io_resid       = io_size;
+
+       KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
+                    (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
+
+       return(retval);
+}
+
+
+int
+is_file_clean(vnode_t vp, off_t filesize)
+{
+        off_t f_offset;
+       int   flags;
+       int   total_dirty = 0;
+
+       for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
+               if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
+                       if (flags & UPL_POP_DIRTY) {
+                               total_dirty++;
+                       }
+               }
+       }
+       if (total_dirty)
+               return(EINVAL);
+
+       return (0);
+}
+
+
+
+/*
+ * Dirty region tracking/clustering mechanism.
+ *
+ * This code (vfs_drt_*) provides a mechanism for tracking and clustering
+ * dirty regions within a larger space (file).  It is primarily intended to
+ * support clustering in large files with many dirty areas.
+ *
+ * The implementation assumes that the dirty regions are pages.
+ *
+ * To represent dirty pages within the file, we store bit vectors in a
+ * variable-size circular hash.
+ */
+
+/*
+ * Bitvector size.  This determines the number of pages we group in a
+ * single hashtable entry.  Each hashtable entry is aligned to this
+ * size within the file.
+ */
+#define DRT_BITVECTOR_PAGES            256
+
+/*
+ * File offset handling.
+ *
+ * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
+ * the correct formula is  (~(DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)
+ */
+#define DRT_ADDRESS_MASK               (~((1 << 20) - 1))
+#define DRT_ALIGN_ADDRESS(addr)                ((addr) & DRT_ADDRESS_MASK)
+
+/*
+ * Hashtable address field handling.
+ *
+ * The low-order bits of the hashtable address are used to conserve
+ * space.
+ *
+ * DRT_HASH_COUNT_MASK must be large enough to store the range
+ * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
+ * to indicate that the bucket is actually unoccupied.
+ */
+#define DRT_HASH_GET_ADDRESS(scm, i)   ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
+#define DRT_HASH_SET_ADDRESS(scm, i, a)                                                                        \
+       do {                                                                                            \
+               (scm)->scm_hashtable[(i)].dhe_control =                                                 \
+                   ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
+       } while (0)
+#define DRT_HASH_COUNT_MASK            0x1ff
+#define DRT_HASH_GET_COUNT(scm, i)     ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
+#define DRT_HASH_SET_COUNT(scm, i, c)                                                                                  \
+       do {                                                                                                            \
+               (scm)->scm_hashtable[(i)].dhe_control =                                                                 \
+                   ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK);       \
+       } while (0)
+#define DRT_HASH_CLEAR(scm, i)                                                                                          \
+       do {                                                                                                            \
+               (scm)->scm_hashtable[(i)].dhe_control = 0;                                                              \
+       } while (0)
+#define DRT_HASH_VACATE(scm, i)                DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
+#define DRT_HASH_VACANT(scm, i)                (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
+#define DRT_HASH_COPY(oscm, oi, scm, i)                                                                        \
+       do {                                                                                            \
+               (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control;        \
+               DRT_BITVECTOR_COPY(oscm, oi, scm, i);                                                   \
+       } while(0);
+
+
+/*
+ * Hash table moduli.
+ *
+ * Since the hashtable entry's size is dependent on the size of
+ * the bitvector, and since the hashtable size is constrained to
+ * both being prime and fitting within the desired allocation
+ * size, these values need to be manually determined.
+ *
+ * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes.
+ *
+ * The small hashtable allocation is 1024 bytes, so the modulus is 23.
+ * The large hashtable allocation is 16384 bytes, so the modulus is 401.
+ */
+#define DRT_HASH_SMALL_MODULUS 23
+#define DRT_HASH_LARGE_MODULUS 401
+
+#define DRT_SMALL_ALLOCATION   1024    /* 104 bytes spare */
+#define DRT_LARGE_ALLOCATION   16384   /* 344 bytes spare */
+
+/* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
+
+/*
+ * Hashtable bitvector handling.
+ *
+ * Bitvector fields are 32 bits long.
+ */
+
+#define DRT_HASH_SET_BIT(scm, i, bit)                          \
+       (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
+
+#define DRT_HASH_CLEAR_BIT(scm, i, bit)                                \
+       (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
+    
+#define DRT_HASH_TEST_BIT(scm, i, bit)                                 \
+       ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
+    
+#define DRT_BITVECTOR_CLEAR(scm, i)                            \
+       bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
+
+#define DRT_BITVECTOR_COPY(oscm, oi, scm, i)                   \
+       bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0],    \
+           &(scm)->scm_hashtable[(i)].dhe_bitvector[0],        \
+           (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
+
+
+/*
+ * Hashtable entry.
+ */
+struct vfs_drt_hashentry {
+       u_int64_t       dhe_control;
+       u_int32_t       dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
+};
+
+/*
+ * Dirty Region Tracking structure.
+ *
+ * The hashtable is allocated entirely inside the DRT structure.
+ *
+ * The hash is a simple circular prime modulus arrangement, the structure
+ * is resized from small to large if it overflows.
+ */
+
+struct vfs_drt_clustermap {
+       u_int32_t               scm_magic;      /* sanity/detection */
+#define DRT_SCM_MAGIC          0x12020003
+       u_int32_t               scm_modulus;    /* current ring size */
+       u_int32_t               scm_buckets;    /* number of occupied buckets */
+       u_int32_t               scm_lastclean;  /* last entry we cleaned */
+       u_int32_t               scm_iskips;     /* number of slot skips */
+
+       struct vfs_drt_hashentry scm_hashtable[0];
+};
+
+
+#define DRT_HASH(scm, addr)            ((addr) % (scm)->scm_modulus)
+#define DRT_HASH_NEXT(scm, addr)       (((addr) + 1) % (scm)->scm_modulus)
+
+/*
+ * Debugging codes and arguments.
+ */
+#define DRT_DEBUG_EMPTYFREE    (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
+#define DRT_DEBUG_RETCLUSTER   (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
+#define DRT_DEBUG_ALLOC                (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
+#define DRT_DEBUG_INSERT       (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
+#define DRT_DEBUG_MARK         (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
+                                                           * dirty */
+                                                          /* 0, setcount */
+                                                          /* 1 (clean, no map) */
+                                                          /* 2 (map alloc fail) */
+                                                          /* 3, resid (partial) */
+#define DRT_DEBUG_6            (FSDBG_CODE(DBG_FSRW, 87))
+#define DRT_DEBUG_SCMDATA      (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
+                                                           * lastclean, iskips */
+
+
+static kern_return_t   vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
+static kern_return_t   vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
+static kern_return_t   vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
+       u_int64_t offset, int *indexp);
+static kern_return_t   vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
+       u_int64_t offset,
+       int *indexp,
+       int recursed);
+static kern_return_t   vfs_drt_do_mark_pages(
+       void            **cmapp,
+       u_int64_t       offset,
+       u_int           length,
+       u_int           *setcountp,
+       int             dirty);
+static void            vfs_drt_trace(
+       struct vfs_drt_clustermap *cmap,
+       int code,
+       int arg1,
+       int arg2,
+       int arg3,
+       int arg4);
+
+
+/*
+ * Allocate and initialise a sparse cluster map.
+ *
+ * Will allocate a new map, resize or compact an existing map.
+ *
+ * XXX we should probably have at least one intermediate map size,
+ * as the 1:16 ratio seems a bit drastic.
+ */
+static kern_return_t
+vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
+{
+       struct vfs_drt_clustermap *cmap, *ocmap;
+       kern_return_t   kret;
+       u_int64_t       offset;
+       u_int32_t       i;
+       int             nsize, active_buckets, index, copycount;
+
+       ocmap = NULL;
+       if (cmapp != NULL)
+               ocmap = *cmapp;
+       
+       /*
+        * Decide on the size of the new map.
+        */
+       if (ocmap == NULL) {
+               nsize = DRT_HASH_SMALL_MODULUS;
+       } else {
+               /* count the number of active buckets in the old map */
+               active_buckets = 0;
+               for (i = 0; i < ocmap->scm_modulus; i++) {
+                       if (!DRT_HASH_VACANT(ocmap, i) &&
+                           (DRT_HASH_GET_COUNT(ocmap, i) != 0))
+                               active_buckets++;
+               }
+               /*
+                * If we're currently using the small allocation, check to
+                * see whether we should grow to the large one.
+                */
+               if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
+                       /* if the ring is nearly full */
+                       if (active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) {
+                               nsize = DRT_HASH_LARGE_MODULUS;
+                       } else {
+                               nsize = DRT_HASH_SMALL_MODULUS;
+                       }
+               } else {
+                       /* already using the large modulus */
+                       nsize = DRT_HASH_LARGE_MODULUS;
+                       /*
+                        * If the ring is completely full, there's
+                        * nothing useful for us to do.  Behave as
+                        * though we had compacted into the new
+                        * array and return.
+                        */
+                       if (active_buckets >= DRT_HASH_LARGE_MODULUS)
+                               return(KERN_SUCCESS);
+               }
+       }
+
+       /*
+        * Allocate and initialise the new map.
+        */
+
+       kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap,
+           (nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
+       if (kret != KERN_SUCCESS)
+               return(kret);
+       cmap->scm_magic = DRT_SCM_MAGIC;
+       cmap->scm_modulus = nsize;
+       cmap->scm_buckets = 0;
+       cmap->scm_lastclean = 0;
+       cmap->scm_iskips = 0;
+       for (i = 0; i < cmap->scm_modulus; i++) {
+               DRT_HASH_CLEAR(cmap, i);
+               DRT_HASH_VACATE(cmap, i);
+               DRT_BITVECTOR_CLEAR(cmap, i);
+       }
+
+       /*
+        * If there's an old map, re-hash entries from it into the new map.
+        */
+       copycount = 0;
+       if (ocmap != NULL) {
+               for (i = 0; i < ocmap->scm_modulus; i++) {
+                       /* skip empty buckets */
+                       if (DRT_HASH_VACANT(ocmap, i) ||
+                           (DRT_HASH_GET_COUNT(ocmap, i) == 0))
+                               continue;
+                       /* get new index */
+                       offset = DRT_HASH_GET_ADDRESS(ocmap, i);
+                       kret = vfs_drt_get_index(&cmap, offset, &index, 1);
+                       if (kret != KERN_SUCCESS) {
+                               /* XXX need to bail out gracefully here */
+                               panic("vfs_drt: new cluster map mysteriously too small");
+                               index = 0;
+                       }
+                       /* copy */
+                       DRT_HASH_COPY(ocmap, i, cmap, index);
+                       copycount++;
+               }
+       }
+
+       /* log what we've done */
+       vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
+       
+       /*
+        * It's important to ensure that *cmapp always points to 
+        * a valid map, so we must overwrite it before freeing
+        * the old map.
+        */
+       *cmapp = cmap;
+       if (ocmap != NULL) {
+               /* emit stats into trace buffer */
+               vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
+                             ocmap->scm_modulus,
+                             ocmap->scm_buckets,
+                             ocmap->scm_lastclean,
+                             ocmap->scm_iskips);
+
+               vfs_drt_free_map(ocmap);
+       }
+       return(KERN_SUCCESS);
+}
+
+
+/*
+ * Free a sparse cluster map.
+ */
+static kern_return_t
+vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
+{
+       kmem_free(kernel_map, (vm_offset_t)cmap, 
+                 (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
+       return(KERN_SUCCESS);
+}
+
+
+/*
+ * Find the hashtable slot currently occupied by an entry for the supplied offset.
+ */
+static kern_return_t
+vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
+{
+       int             index;
+       u_int32_t       i;
+
+       offset = DRT_ALIGN_ADDRESS(offset);
+       index = DRT_HASH(cmap, offset);
+
+       /* traverse the hashtable */
+       for (i = 0; i < cmap->scm_modulus; i++) {
+
+               /*
+                * If the slot is vacant, we can stop.
+                */
+               if (DRT_HASH_VACANT(cmap, index))
+                       break;
+
+               /*
+                * If the address matches our offset, we have success.
+                */
+               if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
+                       *indexp = index;
+                       return(KERN_SUCCESS);
+               }
+
+               /*
+                * Move to the next slot, try again.
+                */
+               index = DRT_HASH_NEXT(cmap, index);
+       }
+       /*
+        * It's not there.
+        */
+       return(KERN_FAILURE);
+}
+
+/*
+ * Find the hashtable slot for the supplied offset.  If we haven't allocated
+ * one yet, allocate one and populate the address field.  Note that it will
+ * not have a nonzero page count and thus will still technically be free, so
+ * in the case where we are called to clean pages, the slot will remain free.
+ */
+static kern_return_t
+vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
+{
+       struct vfs_drt_clustermap *cmap;
+       kern_return_t   kret;
+       u_int32_t       index;
+       u_int32_t       i;
+
+       cmap = *cmapp;
+
+       /* look for an existing entry */
+       kret = vfs_drt_search_index(cmap, offset, indexp);
+       if (kret == KERN_SUCCESS)
+               return(kret);
+
+       /* need to allocate an entry */
+       offset = DRT_ALIGN_ADDRESS(offset);
+       index = DRT_HASH(cmap, offset);
+
+       /* scan from the index forwards looking for a vacant slot */
+       for (i = 0; i < cmap->scm_modulus; i++) {
+               /* slot vacant? */
+               if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) {
+                       cmap->scm_buckets++;
+                       if (index < cmap->scm_lastclean)
+                               cmap->scm_lastclean = index;
+                       DRT_HASH_SET_ADDRESS(cmap, index, offset);
+                       DRT_HASH_SET_COUNT(cmap, index, 0);
+                       DRT_BITVECTOR_CLEAR(cmap, index);
+                       *indexp = index;
+                       vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
+                       return(KERN_SUCCESS);
+               }
+               cmap->scm_iskips += i;
+               index = DRT_HASH_NEXT(cmap, index);
+       }
+
+       /*
+        * We haven't found a vacant slot, so the map is full.  If we're not
+        * already recursed, try reallocating/compacting it.
+        */
+       if (recursed)
+               return(KERN_FAILURE);
+       kret = vfs_drt_alloc_map(cmapp);
+       if (kret == KERN_SUCCESS) {
+               /* now try to insert again */
+               kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
+       }
+       return(kret);
+}
+
+/*
+ * Implementation of set dirty/clean.
+ *
+ * In the 'clean' case, not finding a map is OK.
+ */
+static kern_return_t
+vfs_drt_do_mark_pages(
+       void            **private,
+       u_int64_t       offset,
+       u_int           length,
+       u_int           *setcountp,
+       int             dirty)
+{
+       struct vfs_drt_clustermap *cmap, **cmapp;
+       kern_return_t   kret;
+       int             i, index, pgoff, pgcount, setcount, ecount;
+
+       cmapp = (struct vfs_drt_clustermap **)private;
+       cmap = *cmapp;
+
+       vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
+
+       if (setcountp != NULL)
+               *setcountp = 0;
+       
+       /* allocate a cluster map if we don't already have one */
+       if (cmap == NULL) {
+               /* no cluster map, nothing to clean */
+               if (!dirty) {
+                       vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
+                       return(KERN_SUCCESS);
+               }
+               kret = vfs_drt_alloc_map(cmapp);
+               if (kret != KERN_SUCCESS) {
+                       vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
+                       return(kret);
+               }
+       }
+       setcount = 0;
+
+       /*
+        * Iterate over the length of the region.
+        */
+       while (length > 0) {
+               /*
+                * Get the hashtable index for this offset.
+                *
+                * XXX this will add blank entries if we are clearing a range
+                * that hasn't been dirtied.
+                */
+               kret = vfs_drt_get_index(cmapp, offset, &index, 0);
+               cmap = *cmapp;  /* may have changed! */
+               /* this may be a partial-success return */
+               if (kret != KERN_SUCCESS) {
+                       if (setcountp != NULL)
+                               *setcountp = setcount;
+                       vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
+
+                       return(kret);
+               }
+
+               /*
+                * Work out how many pages we're modifying in this
+                * hashtable entry.
+                */
+               pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE;
+               pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
+
+               /*
+                * Iterate over pages, dirty/clearing as we go.
+                */
+               ecount = DRT_HASH_GET_COUNT(cmap, index);
+               for (i = 0; i < pgcount; i++) {
+                       if (dirty) {
+                               if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
+                                       DRT_HASH_SET_BIT(cmap, index, pgoff + i);
+                                       ecount++;
+                                       setcount++;
+                               }
+                       } else {
+                               if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
+                                       DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
+                                       ecount--;
+                                       setcount++;
+                               }
+                       }
+               }
+               DRT_HASH_SET_COUNT(cmap, index, ecount);
+
+               offset += pgcount * PAGE_SIZE;
+               length -= pgcount * PAGE_SIZE;
+       }
+       if (setcountp != NULL)
+               *setcountp = setcount;
+
+       vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
+
+       return(KERN_SUCCESS);
+}
+
+/*
+ * Mark a set of pages as dirty/clean.
+ *
+ * This is a public interface.
+ *
+ * cmapp
+ *     Pointer to storage suitable for holding a pointer.  Note that
+ *     this must either be NULL or a value set by this function.
+ *
+ * size
+ *     Current file size in bytes.
+ *
+ * offset
+ *     Offset of the first page to be marked as dirty, in bytes.  Must be
+ *     page-aligned.
+ *
+ * length
+ *     Length of dirty region, in bytes.  Must be a multiple of PAGE_SIZE.
+ *
+ * setcountp
+ *     Number of pages newly marked dirty by this call (optional).
+ *
+ * Returns KERN_SUCCESS if all the pages were successfully marked.
+ */
+static kern_return_t
+vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
+{
+       /* XXX size unused, drop from interface */
+       return(vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1));
+}
+
+#if 0
+static kern_return_t
+vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
+{
+       return(vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0));
+}
+#endif
+
+/*
+ * Get a cluster of dirty pages.
+ *
+ * This is a public interface.
+ *
+ * cmapp
+ *     Pointer to storage managed by drt_mark_pages.  Note that this must
+ *     be NULL or a value set by drt_mark_pages.
+ *
+ * offsetp
+ *     Returns the byte offset into the file of the first page in the cluster.
+ *
+ * lengthp
+ *     Returns the length in bytes of the cluster of dirty pages.
+ *
+ * Returns success if a cluster was found.  If KERN_FAILURE is returned, there
+ * are no dirty pages meeting the minmum size criteria.  Private storage will
+ * be released if there are no more dirty pages left in the map
+ *
+ */
+static kern_return_t
+vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
+{
+       struct vfs_drt_clustermap *cmap;
+       u_int64_t       offset;
+       u_int           length;
+       u_int32_t       j;
+       int             index, i, fs, ls;
+
+       /* sanity */
+       if ((cmapp == NULL) || (*cmapp == NULL))
+               return(KERN_FAILURE);
+       cmap = *cmapp;
+
+       /* walk the hashtable */
+       for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
+               index = DRT_HASH(cmap, offset);
+
+               if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0))
+                       continue;
+
+               /* scan the bitfield for a string of bits */
+               fs = -1;
+
+               for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
+                       if (DRT_HASH_TEST_BIT(cmap, index, i)) {
+                               fs = i;
+                               break;
+                       }
+               }
+               if (fs == -1) {
+                       /*  didn't find any bits set */
+                       panic("vfs_drt: entry summary count > 0 but no bits set in map");
+               }
+               for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
+                       if (!DRT_HASH_TEST_BIT(cmap, index, i))
+                               break;
+               }
+               
+               /* compute offset and length, mark pages clean */
+               offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
+               length = ls * PAGE_SIZE;
+               vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
+               cmap->scm_lastclean = index;
+
+               /* return successful */
+               *offsetp = (off_t)offset;
+               *lengthp = length;
+
+               vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
+               return(KERN_SUCCESS);
+       }
+       /*
+        * We didn't find anything... hashtable is empty
+        * emit stats into trace buffer and
+        * then free it
+        */
+       vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
+                     cmap->scm_modulus,
+                     cmap->scm_buckets,
+                     cmap->scm_lastclean,
+                     cmap->scm_iskips);
+       
+       vfs_drt_free_map(cmap);
+       *cmapp = NULL;
+
+       return(KERN_FAILURE);
+}
+
+
+static kern_return_t
+vfs_drt_control(void **cmapp, int op_type)
+{
+       struct vfs_drt_clustermap *cmap;
+
+       /* sanity */
+       if ((cmapp == NULL) || (*cmapp == NULL))
+               return(KERN_FAILURE);
+       cmap = *cmapp;
+
+       switch (op_type) {
+       case 0:
+               /* emit stats into trace buffer */
+               vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
+                             cmap->scm_modulus,
+                             cmap->scm_buckets,
+                             cmap->scm_lastclean,
+                             cmap->scm_iskips);
+
+               vfs_drt_free_map(cmap);
+               *cmapp = NULL;
+               break;
+
+       case 1:
+               cmap->scm_lastclean = 0;
+               break;
+       }
+       return(KERN_SUCCESS);
+}
+
+
+
+/*
+ * Emit a summary of the state of the clustermap into the trace buffer
+ * along with some caller-provided data.
+ */
+#if KDEBUG
+static void
+vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
+{
+       KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
+}
+#else
+static void
+vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code, 
+                         __unused int arg1, __unused int arg2, __unused int arg3, 
+                         __unused int arg4)
+{
+}
+#endif 
+
+#if 0
+/*
+ * Perform basic sanity check on the hash entry summary count
+ * vs. the actual bits set in the entry.
+ */
+static void
+vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
+{
+        int index, i;
+       int bits_on;
+       
+       for (index = 0; index < cmap->scm_modulus; index++) {
+               if (DRT_HASH_VACANT(cmap, index))
+                       continue;
+
+               for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
+                       if (DRT_HASH_TEST_BIT(cmap, index, i))
+                               bits_on++;
+               }
+               if (bits_on != DRT_HASH_GET_COUNT(cmap, index))
+                       panic("bits_on = %d,  index = %d\n", bits_on, index);
+       }               
 }
+#endif