+ if (flags & CL_COMMIT) {
+ int upl_flags;
+
+ pg_offset = upl_offset & PAGE_MASK;
+ abort_size = (upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK;
+
+ upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags, vp);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
+ upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
+ }
+ if (retval == 0)
+ retval = error;
+ } else if (cbp_head)
+ panic("%s(): cbp_head is not NULL.\n", __FUNCTION__);
+
+ if (real_bp) {
+ /*
+ * can get here if we either encountered an error
+ * or we completely zero-filled the request and
+ * no I/O was issued
+ */
+ if (error) {
+ real_bp->b_flags |= B_ERROR;
+ real_bp->b_error = error;
+ }
+ buf_biodone(real_bp);
+ }
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
+
+ return (retval);
+}
+
+#define reset_vector_run_state() \
+ issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
+
+static int
+vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
+ int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ vector_upl_set_pagelist(vector_upl);
+
+ if(io_flag & CL_READ) {
+ if(vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK)==0))
+ io_flag &= ~CL_PRESERVE; /*don't zero fill*/
+ else
+ io_flag |= CL_PRESERVE; /*zero fill*/
+ }
+ return (cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg));
+
+}
+
+static int
+cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
+{
+ int pages_in_prefetch;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
+ (int)f_offset, size, (int)filesize, 0, 0);
+
+ if (f_offset >= filesize) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
+ (int)f_offset, 0, 0, 0, 0);
+ return(0);
+ }
+ if ((off_t)size > (filesize - f_offset))
+ size = filesize - f_offset;
+ pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
+
+ advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
+ (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
+
+ return (pages_in_prefetch);
+}
+
+
+
+static void
+cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
+ int bflag)
+{
+ daddr64_t r_addr;
+ off_t f_offset;
+ int size_of_prefetch;
+ u_int max_prefetch;
+
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
+ (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
+
+ if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
+ rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
+ return;
+ }
+ if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
+ rap->cl_ralen = 0;
+ rap->cl_maxra = 0;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
+ rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
+
+ return;
+ }
+ max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ), (vp->v_mount->mnt_kern_flag & MNTK_SSD));
+
+ if (max_prefetch > speculative_prefetch_max)
+ max_prefetch = speculative_prefetch_max;
+
+ if (max_prefetch <= PAGE_SIZE) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
+ rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0);
+ return;
+ }
+ if (extent->e_addr < rap->cl_maxra && rap->cl_ralen >= 4) {
+ if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) {
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
+ rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
+ return;
+ }
+ }
+ r_addr = max(extent->e_addr, rap->cl_maxra) + 1;
+ f_offset = (off_t)(r_addr * PAGE_SIZE_64);
+
+ size_of_prefetch = 0;
+
+ ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
+
+ if (size_of_prefetch) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
+ rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
+ return;
+ }
+ if (f_offset < filesize) {
+ daddr64_t read_size;
+
+ rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
+
+ read_size = (extent->e_addr + 1) - extent->b_addr;
+
+ if (read_size > rap->cl_ralen) {
+ if (read_size > max_prefetch / PAGE_SIZE)
+ rap->cl_ralen = max_prefetch / PAGE_SIZE;
+ else
+ rap->cl_ralen = read_size;
+ }
+ size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
+
+ if (size_of_prefetch)
+ rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
+ }
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
+ rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
+}
+
+
+int
+cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
+ int size, off_t filesize, int flags)
+{
+ return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
+
+}
+
+
+int
+cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
+ int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ int io_size;
+ int rounded_size;
+ off_t max_size;
+ int local_flags;
+
+ local_flags = CL_PAGEOUT | CL_THROTTLE;
+
+ if ((flags & UPL_IOSYNC) == 0)
+ local_flags |= CL_ASYNC;
+ if ((flags & UPL_NOCOMMIT) == 0)
+ local_flags |= CL_COMMIT;
+ if ((flags & UPL_KEEPCACHED))
+ local_flags |= CL_KEEPCACHED;
+ if (flags & UPL_PAGING_ENCRYPTED)
+ local_flags |= CL_ENCRYPTED;
+
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
+ (int)f_offset, size, (int)filesize, local_flags, 0);
+
+ /*
+ * If they didn't specify any I/O, then we are done...
+ * we can't issue an abort because we don't know how
+ * big the upl really is
+ */
+ if (size <= 0)
+ return (EINVAL);
+
+ if (vp->v_mount->mnt_flag & MNT_RDONLY) {
+ if (local_flags & CL_COMMIT)
+ ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
+ return (EROFS);
+ }
+ /*
+ * can't page-in from a negative offset
+ * or if we're starting beyond the EOF
+ * or if the file offset isn't page aligned
+ * or the size requested isn't a multiple of PAGE_SIZE
+ */
+ if (f_offset < 0 || f_offset >= filesize ||
+ (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
+ if (local_flags & CL_COMMIT)
+ ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
+ return (EINVAL);
+ }
+ max_size = filesize - f_offset;
+
+ if (size < max_size)
+ io_size = size;
+ else
+ io_size = max_size;
+
+ rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
+
+ if (size > rounded_size) {
+ if (local_flags & CL_COMMIT)
+ ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
+ UPL_ABORT_FREE_ON_EMPTY);
+ }
+ return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
+ local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg));
+}
+
+
+int
+cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
+ int size, off_t filesize, int flags)
+{
+ return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
+}
+
+
+int
+cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
+ int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ u_int io_size;
+ int rounded_size;
+ off_t max_size;
+ int retval;
+ int local_flags = 0;
+
+ if (upl == NULL || size < 0)
+ panic("cluster_pagein: NULL upl passed in");
+
+ if ((flags & UPL_IOSYNC) == 0)
+ local_flags |= CL_ASYNC;
+ if ((flags & UPL_NOCOMMIT) == 0)
+ local_flags |= CL_COMMIT;
+ if (flags & UPL_IOSTREAMING)
+ local_flags |= CL_IOSTREAMING;
+ if (flags & UPL_PAGING_ENCRYPTED)
+ local_flags |= CL_ENCRYPTED;
+
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
+ (int)f_offset, size, (int)filesize, local_flags, 0);
+
+ /*
+ * can't page-in from a negative offset
+ * or if we're starting beyond the EOF
+ * or if the file offset isn't page aligned
+ * or the size requested isn't a multiple of PAGE_SIZE
+ */
+ if (f_offset < 0 || f_offset >= filesize ||
+ (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
+ if (local_flags & CL_COMMIT)
+ ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
+ return (EINVAL);
+ }
+ max_size = filesize - f_offset;
+
+ if (size < max_size)
+ io_size = size;
+ else
+ io_size = max_size;
+
+ rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
+
+ if (size > rounded_size && (local_flags & CL_COMMIT))
+ ubc_upl_abort_range(upl, upl_offset + rounded_size,
+ size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
+
+ retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
+ local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
+
+ return (retval);
+}
+
+
+int
+cluster_bp(buf_t bp)
+{
+ return cluster_bp_ext(bp, NULL, NULL);
+}
+
+
+int
+cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ off_t f_offset;
+ int flags;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
+ bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
+
+ if (bp->b_flags & B_READ)
+ flags = CL_ASYNC | CL_READ;
+ else
+ flags = CL_ASYNC;
+ if (bp->b_flags & B_PASSIVE)
+ flags |= CL_PASSIVE;
+
+ f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
+
+ return (cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg));
+}
+
+
+
+int
+cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
+{
+ return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
+}
+
+
+int
+cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
+ int xflags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ user_ssize_t cur_resid;
+ int retval = 0;
+ int flags;
+ int zflags;
+ int bflag;
+ int write_type = IO_COPY;
+ u_int32_t write_length;
+
+ flags = xflags;
+
+ if (flags & IO_PASSIVE)
+ bflag = CL_PASSIVE;
+ else
+ bflag = 0;
+
+ if (vp->v_flag & VNOCACHE_DATA){
+ flags |= IO_NOCACHE;
+ bflag |= CL_NOCACHE;
+ }
+ if (uio == NULL) {
+ /*
+ * no user data...
+ * this call is being made to zero-fill some range in the file
+ */
+ retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
+
+ return(retval);
+ }
+ /*
+ * do a write through the cache if one of the following is true....
+ * NOCACHE is not true or NODIRECT is true
+ * the uio request doesn't target USERSPACE
+ * otherwise, find out if we want the direct or contig variant for
+ * the first vector in the uio request
+ */
+ if ( ((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
+ retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
+
+ if ( (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT)
+ /*
+ * must go through the cached variant in this case
+ */
+ write_type = IO_COPY;
+
+ while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
+
+ switch (write_type) {
+
+ case IO_COPY:
+ /*
+ * make sure the uio_resid isn't too big...
+ * internally, we want to handle all of the I/O in
+ * chunk sizes that fit in a 32 bit int
+ */
+ if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
+ /*
+ * we're going to have to call cluster_write_copy
+ * more than once...
+ *
+ * only want the last call to cluster_write_copy to
+ * have the IO_TAILZEROFILL flag set and only the
+ * first call should have IO_HEADZEROFILL
+ */
+ zflags = flags & ~IO_TAILZEROFILL;
+ flags &= ~IO_HEADZEROFILL;
+
+ write_length = MAX_IO_REQUEST_SIZE;
+ } else {
+ /*
+ * last call to cluster_write_copy
+ */
+ zflags = flags;
+
+ write_length = (u_int32_t)cur_resid;
+ }
+ retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
+ break;
+
+ case IO_CONTIG:
+ zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
+
+ if (flags & IO_HEADZEROFILL) {
+ /*
+ * only do this once per request
+ */
+ flags &= ~IO_HEADZEROFILL;
+
+ retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
+ headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
+ if (retval)
+ break;
+ }
+ retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
+
+ if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
+ /*
+ * we're done with the data from the user specified buffer(s)
+ * and we've been requested to zero fill at the tail
+ * treat this as an IO_HEADZEROFILL which doesn't require a uio
+ * by rearranging the args and passing in IO_HEADZEROFILL
+ */
+ retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset,
+ (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
+ }
+ break;
+
+ case IO_DIRECT:
+ /*
+ * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
+ */
+ retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
+ break;
+
+ case IO_UNKNOWN:
+ retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
+ break;
+ }
+ /*
+ * in case we end up calling cluster_write_copy (from cluster_write_direct)
+ * multiple times to service a multi-vector request that is not aligned properly
+ * we need to update the oldEOF so that we
+ * don't zero-fill the head of a page if we've successfully written
+ * data to that area... 'cluster_write_copy' will zero-fill the head of a
+ * page that is beyond the oldEOF if the write is unaligned... we only
+ * want that to happen for the very first page of the cluster_write,
+ * NOT the first page of each vector making up a multi-vector write.
+ */
+ if (uio->uio_offset > oldEOF)
+ oldEOF = uio->uio_offset;
+ }
+ return (retval);
+}
+
+
+static int
+cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
+ int flags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ upl_t upl;
+ upl_page_info_t *pl;
+ vm_offset_t upl_offset;
+ vm_offset_t vector_upl_offset = 0;
+ u_int32_t io_req_size;
+ u_int32_t offset_in_file;
+ u_int32_t offset_in_iovbase;
+ u_int32_t io_size;
+ int io_flag = 0;
+ upl_size_t upl_size, vector_upl_size = 0;
+ vm_size_t upl_needed_size;
+ mach_msg_type_number_t pages_in_pl;
+ upl_control_flags_t upl_flags;
+ kern_return_t kret;
+ mach_msg_type_number_t i;
+ int force_data_sync;
+ int retval = 0;
+ int first_IO = 1;
+ struct clios iostate;
+ user_addr_t iov_base;
+ u_int32_t mem_alignment_mask;
+ u_int32_t devblocksize;
+ u_int32_t max_io_size;
+ u_int32_t max_upl_size;
+ u_int32_t max_vector_size;
+ u_int32_t bytes_outstanding_limit;
+ boolean_t io_throttled = FALSE;
+
+ u_int32_t vector_upl_iosize = 0;
+ int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1);
+ off_t v_upl_uio_offset = 0;
+ int vector_upl_index=0;
+ upl_t vector_upl = NULL;
+
+
+ /*
+ * When we enter this routine, we know
+ * -- the resid will not exceed iov_len
+ */
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
+ (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
+
+ max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
+
+ io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
+
+ if (flags & IO_PASSIVE)
+ io_flag |= CL_PASSIVE;
+
+ if (flags & IO_NOCACHE)
+ io_flag |= CL_NOCACHE;
+
+ if (flags & IO_SKIP_ENCRYPTION)
+ io_flag |= CL_ENCRYPTED;
+
+ iostate.io_completed = 0;
+ iostate.io_issued = 0;
+ iostate.io_error = 0;
+ iostate.io_wanted = 0;
+
+ lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr);
+
+ mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
+ devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
+
+ if (devblocksize == 1) {
+ /*
+ * the AFP client advertises a devblocksize of 1
+ * however, its BLOCKMAP routine maps to physical
+ * blocks that are PAGE_SIZE in size...
+ * therefore we can't ask for I/Os that aren't page aligned
+ * or aren't multiples of PAGE_SIZE in size
+ * by setting devblocksize to PAGE_SIZE, we re-instate
+ * the old behavior we had before the mem_alignment_mask
+ * changes went in...
+ */
+ devblocksize = PAGE_SIZE;
+ }
+
+next_dwrite:
+ io_req_size = *write_length;
+ iov_base = uio_curriovbase(uio);
+
+ offset_in_file = (u_int32_t)uio->uio_offset & PAGE_MASK;
+ offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
+
+ if (offset_in_file || offset_in_iovbase) {
+ /*
+ * one of the 2 important offsets is misaligned
+ * so fire an I/O through the cache for this entire vector
+ */
+ goto wait_for_dwrites;
+ }
+ if (iov_base & (devblocksize - 1)) {
+ /*
+ * the offset in memory must be on a device block boundary
+ * so that we can guarantee that we can generate an
+ * I/O that ends on a page boundary in cluster_io
+ */
+ goto wait_for_dwrites;
+ }
+
+ task_update_logical_writes(current_task(), (io_req_size & ~PAGE_MASK), TASK_WRITE_IMMEDIATE, vp);
+ while (io_req_size >= PAGE_SIZE && uio->uio_offset < newEOF && retval == 0) {
+ int throttle_type;
+
+ if ( (throttle_type = cluster_is_throttled(vp)) ) {
+ /*
+ * we're in the throttle window, at the very least
+ * we want to limit the size of the I/O we're about
+ * to issue
+ */
+ if ( (flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) {
+ /*
+ * we're in the throttle window and at least 1 I/O
+ * has already been issued by a throttleable thread
+ * in this window, so return with EAGAIN to indicate
+ * to the FS issuing the cluster_write call that it
+ * should now throttle after dropping any locks
+ */
+ throttle_info_update_by_mount(vp->v_mount);
+
+ io_throttled = TRUE;
+ goto wait_for_dwrites;
+ }
+ max_vector_size = THROTTLE_MAX_IOSIZE;
+ max_io_size = THROTTLE_MAX_IOSIZE;
+ } else {
+ max_vector_size = MAX_VECTOR_UPL_SIZE;
+ max_io_size = max_upl_size;
+ }
+
+ if (first_IO) {
+ cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
+ first_IO = 0;
+ }
+ io_size = io_req_size & ~PAGE_MASK;
+ iov_base = uio_curriovbase(uio);
+
+ if (io_size > max_io_size)
+ io_size = max_io_size;
+
+ if(useVectorUPL && (iov_base & PAGE_MASK)) {
+ /*
+ * We have an iov_base that's not page-aligned.
+ * Issue all I/O's that have been collected within
+ * this Vectored UPL.
+ */
+ if(vector_upl_index) {
+ retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
+ reset_vector_run_state();
+ }
+
+ /*
+ * After this point, if we are using the Vector UPL path and the base is
+ * not page-aligned then the UPL with that base will be the first in the vector UPL.
+ */
+ }
+
+ upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
+ upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
+ (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
+
+ vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
+ for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
+ pages_in_pl = 0;
+ upl_size = upl_needed_size;
+ upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
+ UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
+ | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
+
+ kret = vm_map_get_upl(map,
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
+ &upl_size,
+ &upl,
+ NULL,
+ &pages_in_pl,
+ &upl_flags,
+ force_data_sync);
+
+ if (kret != KERN_SUCCESS) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+ 0, 0, 0, kret, 0);
+ /*
+ * failed to get pagelist
+ *
+ * we may have already spun some portion of this request
+ * off as async requests... we need to wait for the I/O
+ * to complete before returning
+ */
+ goto wait_for_dwrites;
+ }
+ pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
+ pages_in_pl = upl_size / PAGE_SIZE;
+
+ for (i = 0; i < pages_in_pl; i++) {
+ if (!upl_valid_page(pl, i))
+ break;
+ }
+ if (i == pages_in_pl)
+ break;
+
+ /*
+ * didn't get all the pages back that we
+ * needed... release this upl and try again
+ */
+ ubc_upl_abort(upl, 0);
+ }
+ if (force_data_sync >= 3) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+ i, pages_in_pl, upl_size, kret, 0);
+ /*
+ * for some reason, we couldn't acquire a hold on all
+ * the pages needed in the user's address space
+ *
+ * we may have already spun some portion of this request
+ * off as async requests... we need to wait for the I/O
+ * to complete before returning
+ */
+ goto wait_for_dwrites;
+ }
+
+ /*
+ * Consider the possibility that upl_size wasn't satisfied.
+ */
+ if (upl_size < upl_needed_size) {
+ if (upl_size && upl_offset == 0)
+ io_size = upl_size;
+ else
+ io_size = 0;
+ }
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
+ (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
+
+ if (io_size == 0) {
+ ubc_upl_abort(upl, 0);
+ /*
+ * we may have already spun some portion of this request
+ * off as async requests... we need to wait for the I/O
+ * to complete before returning
+ */
+ goto wait_for_dwrites;
+ }
+
+ if(useVectorUPL) {
+ vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
+ if(end_off)
+ issueVectorUPL = 1;
+ /*
+ * After this point, if we are using a vector UPL, then
+ * either all the UPL elements end on a page boundary OR
+ * this UPL is the last element because it does not end
+ * on a page boundary.
+ */
+ }
+
+ /*
+ * we want push out these writes asynchronously so that we can overlap
+ * the preparation of the next I/O
+ * if there are already too many outstanding writes
+ * wait until some complete before issuing the next
+ */
+ if (vp->v_mount->mnt_minsaturationbytecount)
+ bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount;
+ else
+ bytes_outstanding_limit = max_upl_size * IO_SCALE(vp, 2);
+
+ cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct");
+
+ if (iostate.io_error) {
+ /*
+ * one of the earlier writes we issued ran into a hard error
+ * don't issue any more writes, cleanup the UPL
+ * that was just created but not used, then
+ * go wait for all writes that are part of this stream
+ * to complete before returning the error to the caller
+ */
+ ubc_upl_abort(upl, 0);
+
+ goto wait_for_dwrites;
+ }
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
+ (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
+
+ if(!useVectorUPL)
+ retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
+ io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
+
+ else {
+ if(!vector_upl_index) {
+ vector_upl = vector_upl_create(upl_offset);
+ v_upl_uio_offset = uio->uio_offset;
+ vector_upl_offset = upl_offset;
+ }
+
+ vector_upl_set_subupl(vector_upl,upl,upl_size);
+ vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
+ vector_upl_index++;
+ vector_upl_iosize += io_size;
+ vector_upl_size += upl_size;
+
+ if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= max_vector_size) {
+ retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
+ reset_vector_run_state();
+ }
+ }
+
+ /*
+ * update the uio structure to
+ * reflect the I/O that we just issued
+ */
+ uio_update(uio, (user_size_t)io_size);
+
+ /*
+ * in case we end up calling through to cluster_write_copy to finish
+ * the tail of this request, we need to update the oldEOF so that we
+ * don't zero-fill the head of a page if we've successfully written
+ * data to that area... 'cluster_write_copy' will zero-fill the head of a
+ * page that is beyond the oldEOF if the write is unaligned... we only
+ * want that to happen for the very first page of the cluster_write,
+ * NOT the first page of each vector making up a multi-vector write.
+ */
+ if (uio->uio_offset > oldEOF)
+ oldEOF = uio->uio_offset;
+
+ io_req_size -= io_size;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
+ (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
+
+ } /* end while */
+
+ if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
+
+ retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE);
+
+ if (retval == 0 && *write_type == IO_DIRECT) {
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
+ (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
+
+ goto next_dwrite;
+ }
+ }
+
+wait_for_dwrites:
+
+ if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
+ retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
+ reset_vector_run_state();
+ }
+ /*
+ * make sure all async writes issued as part of this stream
+ * have completed before we return
+ */
+ cluster_iostate_wait(&iostate, 0, "cluster_write_direct");
+
+ if (iostate.io_error)
+ retval = iostate.io_error;
+
+ lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp);
+
+ if (io_throttled == TRUE && retval == 0)
+ retval = EAGAIN;
+
+ if (io_req_size && retval == 0) {
+ /*
+ * we couldn't handle the tail of this request in DIRECT mode
+ * so fire it through the copy path
+ *
+ * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
+ * so we can just pass 0 in for the headOff and tailOff
+ */
+ if (uio->uio_offset > oldEOF)
+ oldEOF = uio->uio_offset;
+
+ retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
+
+ *write_type = IO_UNKNOWN;
+ }
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
+ (int)uio->uio_offset, io_req_size, retval, 4, 0);
+
+ return (retval);
+}
+
+
+static int
+cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
+ int (*callback)(buf_t, void *), void *callback_arg, int bflag)
+{
+ upl_page_info_t *pl;
+ addr64_t src_paddr = 0;
+ upl_t upl[MAX_VECTS];
+ vm_offset_t upl_offset;
+ u_int32_t tail_size = 0;
+ u_int32_t io_size;
+ u_int32_t xsize;
+ upl_size_t upl_size;
+ vm_size_t upl_needed_size;
+ mach_msg_type_number_t pages_in_pl;
+ upl_control_flags_t upl_flags;
+ kern_return_t kret;
+ struct clios iostate;
+ int error = 0;
+ int cur_upl = 0;
+ int num_upl = 0;
+ int n;
+ user_addr_t iov_base;
+ u_int32_t devblocksize;
+ u_int32_t mem_alignment_mask;
+
+ /*
+ * When we enter this routine, we know
+ * -- the io_req_size will not exceed iov_len
+ * -- the target address is physically contiguous
+ */
+ cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
+
+ devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
+ mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
+
+ iostate.io_completed = 0;
+ iostate.io_issued = 0;
+ iostate.io_error = 0;
+ iostate.io_wanted = 0;
+
+ lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr);
+
+next_cwrite:
+ io_size = *write_length;
+
+ iov_base = uio_curriovbase(uio);
+
+ upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
+ upl_needed_size = upl_offset + io_size;
+
+ pages_in_pl = 0;
+ upl_size = upl_needed_size;
+ upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
+ UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
+ | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
+
+ vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
+ kret = vm_map_get_upl(map,
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
+ &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
+
+ if (kret != KERN_SUCCESS) {
+ /*
+ * failed to get pagelist
+ */
+ error = EINVAL;
+ goto wait_for_cwrites;
+ }
+ num_upl++;
+
+ /*
+ * Consider the possibility that upl_size wasn't satisfied.
+ */
+ if (upl_size < upl_needed_size) {
+ /*
+ * This is a failure in the physical memory case.
+ */
+ error = EINVAL;
+ goto wait_for_cwrites;
+ }
+ pl = ubc_upl_pageinfo(upl[cur_upl]);
+
+ src_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
+
+ while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
+ u_int32_t head_size;
+
+ head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
+
+ if (head_size > io_size)
+ head_size = io_size;
+
+ error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
+
+ if (error)
+ goto wait_for_cwrites;
+
+ upl_offset += head_size;
+ src_paddr += head_size;
+ io_size -= head_size;
+
+ iov_base += head_size;
+ }
+ if ((u_int32_t)iov_base & mem_alignment_mask) {
+ /*
+ * request doesn't set up on a memory boundary
+ * the underlying DMA engine can handle...
+ * return an error instead of going through
+ * the slow copy path since the intent of this
+ * path is direct I/O from device memory
+ */
+ error = EINVAL;
+ goto wait_for_cwrites;
+ }
+
+ tail_size = io_size & (devblocksize - 1);
+ io_size -= tail_size;
+
+ while (io_size && error == 0) {
+
+ if (io_size > MAX_IO_CONTIG_SIZE)
+ xsize = MAX_IO_CONTIG_SIZE;
+ else
+ xsize = io_size;
+ /*
+ * request asynchronously so that we can overlap
+ * the preparation of the next I/O... we'll do
+ * the commit after all the I/O has completed
+ * since its all issued against the same UPL
+ * if there are already too many outstanding writes
+ * wait until some have completed before issuing the next
+ */
+ cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig");
+
+ if (iostate.io_error) {
+ /*
+ * one of the earlier writes we issued ran into a hard error
+ * don't issue any more writes...
+ * go wait for all writes that are part of this stream
+ * to complete before returning the error to the caller
+ */
+ goto wait_for_cwrites;
+ }
+ /*
+ * issue an asynchronous write to cluster_io
+ */
+ error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
+ xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
+
+ if (error == 0) {
+ /*
+ * The cluster_io write completed successfully,
+ * update the uio structure
+ */
+ uio_update(uio, (user_size_t)xsize);
+
+ upl_offset += xsize;
+ src_paddr += xsize;
+ io_size -= xsize;
+ }
+ }
+ if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
+
+ error = cluster_io_type(uio, write_type, write_length, 0);
+
+ if (error == 0 && *write_type == IO_CONTIG) {
+ cur_upl++;
+ goto next_cwrite;
+ }
+ } else
+ *write_type = IO_UNKNOWN;
+
+wait_for_cwrites:
+ /*
+ * make sure all async writes that are part of this stream
+ * have completed before we proceed
+ */
+ cluster_iostate_wait(&iostate, 0, "cluster_write_contig");
+
+ if (iostate.io_error)
+ error = iostate.io_error;
+
+ lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp);
+
+ if (error == 0 && tail_size)
+ error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
+
+ for (n = 0; n < num_upl; n++)
+ /*
+ * just release our hold on each physically contiguous
+ * region without changing any state
+ */
+ ubc_upl_abort(upl[n], 0);
+
+ return (error);
+}
+
+
+/*
+ * need to avoid a race between an msync of a range of pages dirtied via mmap
+ * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
+ * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
+ *
+ * we should never force-zero-fill pages that are already valid in the cache...
+ * the entire page contains valid data (either from disk, zero-filled or dirtied
+ * via an mmap) so we can only do damage by trying to zero-fill
+ *
+ */
+static int
+cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
+{
+ int zero_pg_index;
+ boolean_t need_cluster_zero = TRUE;
+
+ if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
+
+ bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
+ zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
+
+ if (upl_valid_page(pl, zero_pg_index)) {
+ /*
+ * never force zero valid pages - dirty or clean
+ * we'll leave these in the UPL for cluster_write_copy to deal with
+ */
+ need_cluster_zero = FALSE;
+ }
+ }
+ if (need_cluster_zero == TRUE)
+ cluster_zero(upl, io_offset, bytes_to_zero, NULL);
+
+ return (bytes_to_zero);
+}
+
+
+static int
+cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
+ off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
+{
+ upl_page_info_t *pl;
+ upl_t upl;
+ vm_offset_t upl_offset = 0;
+ vm_size_t upl_size;
+ off_t upl_f_offset;
+ int pages_in_upl;
+ int start_offset;
+ int xfer_resid;
+ int io_size;
+ int io_offset;
+ int bytes_to_zero;
+ int bytes_to_move;
+ kern_return_t kret;
+ int retval = 0;
+ int io_resid;
+ long long total_size;
+ long long zero_cnt;
+ off_t zero_off;
+ long long zero_cnt1;
+ off_t zero_off1;
+ off_t write_off = 0;
+ int write_cnt = 0;
+ boolean_t first_pass = FALSE;
+ struct cl_extent cl;
+ struct cl_writebehind *wbp;
+ int bflag;
+ u_int max_cluster_pgcount;
+ u_int max_io_size;
+
+ if (uio) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
+ (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
+
+ io_resid = io_req_size;
+ } else {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
+ 0, 0, (int)oldEOF, (int)newEOF, 0);
+
+ io_resid = 0;
+ }
+ if (flags & IO_PASSIVE)
+ bflag = CL_PASSIVE;
+ else
+ bflag = 0;
+ if (flags & IO_NOCACHE)
+ bflag |= CL_NOCACHE;
+
+ if (flags & IO_SKIP_ENCRYPTION)
+ bflag |= CL_ENCRYPTED;
+
+ zero_cnt = 0;
+ zero_cnt1 = 0;
+ zero_off = 0;
+ zero_off1 = 0;
+
+ max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
+ max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
+
+ if (flags & IO_HEADZEROFILL) {
+ /*
+ * some filesystems (HFS is one) don't support unallocated holes within a file...
+ * so we zero fill the intervening space between the old EOF and the offset
+ * where the next chunk of real data begins.... ftruncate will also use this
+ * routine to zero fill to the new EOF when growing a file... in this case, the
+ * uio structure will not be provided
+ */
+ if (uio) {
+ if (headOff < uio->uio_offset) {
+ zero_cnt = uio->uio_offset - headOff;
+ zero_off = headOff;
+ }
+ } else if (headOff < newEOF) {
+ zero_cnt = newEOF - headOff;
+ zero_off = headOff;
+ }
+ } else {
+ if (uio && uio->uio_offset > oldEOF) {
+ zero_off = uio->uio_offset & ~PAGE_MASK_64;
+
+ if (zero_off >= oldEOF) {
+ zero_cnt = uio->uio_offset - zero_off;
+
+ flags |= IO_HEADZEROFILL;
+ }
+ }
+ }
+ if (flags & IO_TAILZEROFILL) {
+ if (uio) {
+ zero_off1 = uio->uio_offset + io_req_size;
+
+ if (zero_off1 < tailOff)
+ zero_cnt1 = tailOff - zero_off1;
+ }
+ } else {
+ if (uio && newEOF > oldEOF) {
+ zero_off1 = uio->uio_offset + io_req_size;
+
+ if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
+ zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
+
+ flags |= IO_TAILZEROFILL;
+ }
+ }
+ }
+ if (zero_cnt == 0 && uio == (struct uio *) 0) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
+ retval, 0, 0, 0, 0);
+ return (0);
+ }
+ if (uio) {
+ write_off = uio->uio_offset;
+ write_cnt = uio_resid(uio);
+ /*
+ * delay updating the sequential write info
+ * in the control block until we've obtained
+ * the lock for it
+ */
+ first_pass = TRUE;
+ }
+ while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
+ /*
+ * for this iteration of the loop, figure out where our starting point is
+ */
+ if (zero_cnt) {
+ start_offset = (int)(zero_off & PAGE_MASK_64);
+ upl_f_offset = zero_off - start_offset;
+ } else if (io_resid) {
+ start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
+ upl_f_offset = uio->uio_offset - start_offset;
+ } else {
+ start_offset = (int)(zero_off1 & PAGE_MASK_64);
+ upl_f_offset = zero_off1 - start_offset;
+ }
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
+ (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
+
+ if (total_size > max_io_size)
+ total_size = max_io_size;
+
+ cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
+
+ if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
+ /*
+ * assumption... total_size <= io_resid
+ * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
+ */
+ if ((start_offset + total_size) > max_io_size)
+ total_size = max_io_size - start_offset;
+ xfer_resid = total_size;
+
+ retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
+
+ if (retval)
+ break;
+
+ io_resid -= (total_size - xfer_resid);
+ total_size = xfer_resid;
+ start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
+ upl_f_offset = uio->uio_offset - start_offset;
+
+ if (total_size == 0) {
+ if (start_offset) {
+ /*
+ * the write did not finish on a page boundary
+ * which will leave upl_f_offset pointing to the
+ * beginning of the last page written instead of
+ * the page beyond it... bump it in this case
+ * so that the cluster code records the last page
+ * written as dirty
+ */
+ upl_f_offset += PAGE_SIZE_64;
+ }
+ upl_size = 0;
+
+ goto check_cluster;
+ }
+ }
+ /*
+ * compute the size of the upl needed to encompass
+ * the requested write... limit each call to cluster_io
+ * to the maximum UPL size... cluster_io will clip if
+ * this exceeds the maximum io_size for the device,
+ * make sure to account for
+ * a starting offset that's not page aligned
+ */
+ upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
+
+ if (upl_size > max_io_size)
+ upl_size = max_io_size;
+
+ pages_in_upl = upl_size / PAGE_SIZE;
+ io_size = upl_size - start_offset;
+
+ if ((long long)io_size > total_size)
+ io_size = total_size;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
+
+
+ /*
+ * Gather the pages from the buffer cache.
+ * The UPL_WILL_MODIFY flag lets the UPL subsystem know
+ * that we intend to modify these pages.
+ */
+ kret = ubc_create_upl(vp,
+ upl_f_offset,
+ upl_size,
+ &upl,
+ &pl,
+ UPL_SET_LITE | (( uio!=NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY));
+ if (kret != KERN_SUCCESS)
+ panic("cluster_write_copy: failed to get pagelist");
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
+ upl, (int)upl_f_offset, start_offset, 0, 0);
+
+ if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
+ int read_size;
+
+ /*
+ * we're starting in the middle of the first page of the upl
+ * and the page isn't currently valid, so we're going to have
+ * to read it in first... this is a synchronous operation
+ */
+ read_size = PAGE_SIZE;
+
+ if ((upl_f_offset + read_size) > oldEOF)
+ read_size = oldEOF - upl_f_offset;
+
+ retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
+ CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
+ if (retval) {
+ /*
+ * we had an error during the read which causes us to abort
+ * the current cluster_write request... before we do, we need
+ * to release the rest of the pages in the upl without modifying
+ * there state and mark the failed page in error
+ */
+ ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
+
+ if (upl_size > PAGE_SIZE)
+ ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
+ upl, 0, 0, retval, 0);
+ break;
+ }
+ }
+ if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
+ /*
+ * the last offset we're writing to in this upl does not end on a page
+ * boundary... if it's not beyond the old EOF, then we'll also need to
+ * pre-read this page in if it isn't already valid
+ */
+ upl_offset = upl_size - PAGE_SIZE;
+
+ if ((upl_f_offset + start_offset + io_size) < oldEOF &&
+ !upl_valid_page(pl, upl_offset / PAGE_SIZE)) {
+ int read_size;
+
+ read_size = PAGE_SIZE;
+
+ if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF)
+ read_size = oldEOF - (upl_f_offset + upl_offset);
+
+ retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
+ CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
+ if (retval) {
+ /*
+ * we had an error during the read which causes us to abort
+ * the current cluster_write request... before we do, we
+ * need to release the rest of the pages in the upl without
+ * modifying there state and mark the failed page in error
+ */
+ ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
+
+ if (upl_size > PAGE_SIZE)
+ ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
+ upl, 0, 0, retval, 0);
+ break;
+ }
+ }
+ }
+ xfer_resid = io_size;
+ io_offset = start_offset;
+
+ while (zero_cnt && xfer_resid) {
+
+ if (zero_cnt < (long long)xfer_resid)
+ bytes_to_zero = zero_cnt;
+ else
+ bytes_to_zero = xfer_resid;
+
+ bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
+
+ xfer_resid -= bytes_to_zero;
+ zero_cnt -= bytes_to_zero;
+ zero_off += bytes_to_zero;
+ io_offset += bytes_to_zero;
+ }
+ if (xfer_resid && io_resid) {
+ u_int32_t io_requested;
+
+ bytes_to_move = min(io_resid, xfer_resid);
+ io_requested = bytes_to_move;
+
+ retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
+
+ if (retval) {
+ ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
+ upl, 0, 0, retval, 0);
+ } else {
+ io_resid -= bytes_to_move;
+ xfer_resid -= bytes_to_move;
+ io_offset += bytes_to_move;
+ }
+ }
+ while (xfer_resid && zero_cnt1 && retval == 0) {
+
+ if (zero_cnt1 < (long long)xfer_resid)
+ bytes_to_zero = zero_cnt1;
+ else
+ bytes_to_zero = xfer_resid;
+
+ bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
+
+ xfer_resid -= bytes_to_zero;
+ zero_cnt1 -= bytes_to_zero;
+ zero_off1 += bytes_to_zero;
+ io_offset += bytes_to_zero;
+ }
+ if (retval == 0) {
+ int cl_index;
+ int ret_cluster_try_push;
+
+ io_size += start_offset;
+
+ if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
+ /*
+ * if we're extending the file with this write
+ * we'll zero fill the rest of the page so that
+ * if the file gets extended again in such a way as to leave a
+ * hole starting at this EOF, we'll have zero's in the correct spot
+ */
+ cluster_zero(upl, io_size, upl_size - io_size, NULL);
+ }
+ /*
+ * release the upl now if we hold one since...
+ * 1) pages in it may be present in the sparse cluster map
+ * and may span 2 separate buckets there... if they do and
+ * we happen to have to flush a bucket to make room and it intersects
+ * this upl, a deadlock may result on page BUSY
+ * 2) we're delaying the I/O... from this point forward we're just updating
+ * the cluster state... no need to hold the pages, so commit them
+ * 3) IO_SYNC is set...
+ * because we had to ask for a UPL that provides currenty non-present pages, the
+ * UPL has been automatically set to clear the dirty flags (both software and hardware)
+ * upon committing it... this is not the behavior we want since it's possible for
+ * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
+ * we'll pick these pages back up later with the correct behavior specified.
+ * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
+ * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
+ * we hold since the flushing context is holding the cluster lock.
+ */
+ ubc_upl_commit_range(upl, 0, upl_size,
+ UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+check_cluster:
+ /*
+ * calculate the last logical block number
+ * that this delayed I/O encompassed
+ */
+ cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
+
+ if (flags & IO_SYNC) {
+ /*
+ * if the IO_SYNC flag is set than we need to
+ * bypass any clusters and immediately issue
+ * the I/O
+ */
+ goto issue_io;
+ }
+ /*
+ * take the lock to protect our accesses
+ * of the writebehind and sparse cluster state
+ */
+ wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
+
+ if (wbp->cl_scmap) {
+
+ if ( !(flags & IO_NOCACHE)) {
+ /*
+ * we've fallen into the sparse
+ * cluster method of delaying dirty pages
+ */
+ sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
+
+ lck_mtx_unlock(&wbp->cl_lockw);
+
+ continue;
+ }
+ /*
+ * must have done cached writes that fell into
+ * the sparse cluster mechanism... we've switched
+ * to uncached writes on the file, so go ahead
+ * and push whatever's in the sparse map
+ * and switch back to normal clustering
+ */
+ wbp->cl_number = 0;
+
+ sparse_cluster_push(&(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg);
+ /*
+ * no clusters of either type present at this point
+ * so just go directly to start_new_cluster since
+ * we know we need to delay this I/O since we've
+ * already released the pages back into the cache
+ * to avoid the deadlock with sparse_cluster_push
+ */
+ goto start_new_cluster;
+ }
+ if (first_pass) {
+ if (write_off == wbp->cl_last_write)
+ wbp->cl_seq_written += write_cnt;
+ else
+ wbp->cl_seq_written = write_cnt;
+
+ wbp->cl_last_write = write_off + write_cnt;
+
+ first_pass = FALSE;
+ }
+ if (wbp->cl_number == 0)
+ /*
+ * no clusters currently present
+ */
+ goto start_new_cluster;
+
+ for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
+ /*
+ * check each cluster that we currently hold
+ * try to merge some or all of this write into
+ * one or more of the existing clusters... if
+ * any portion of the write remains, start a
+ * new cluster
+ */
+ if (cl.b_addr >= wbp->cl_clusters[cl_index].b_addr) {
+ /*
+ * the current write starts at or after the current cluster
+ */
+ if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
+ /*
+ * we have a write that fits entirely
+ * within the existing cluster limits
+ */
+ if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr)
+ /*
+ * update our idea of where the cluster ends
+ */
+ wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
+ break;
+ }
+ if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
+ /*
+ * we have a write that starts in the middle of the current cluster
+ * but extends beyond the cluster's limit... we know this because
+ * of the previous checks
+ * we'll extend the current cluster to the max
+ * and update the b_addr for the current write to reflect that
+ * the head of it was absorbed into this cluster...
+ * note that we'll always have a leftover tail in this case since
+ * full absorbtion would have occurred in the clause above
+ */
+ wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
+
+ cl.b_addr = wbp->cl_clusters[cl_index].e_addr;
+ }
+ /*
+ * we come here for the case where the current write starts
+ * beyond the limit of the existing cluster or we have a leftover
+ * tail after a partial absorbtion
+ *
+ * in either case, we'll check the remaining clusters before
+ * starting a new one
+ */
+ } else {
+ /*
+ * the current write starts in front of the cluster we're currently considering
+ */
+ if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= max_cluster_pgcount) {
+ /*
+ * we can just merge the new request into
+ * this cluster and leave it in the cache
+ * since the resulting cluster is still
+ * less than the maximum allowable size
+ */
+ wbp->cl_clusters[cl_index].b_addr = cl.b_addr;
+
+ if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) {
+ /*
+ * the current write completely
+ * envelops the existing cluster and since
+ * each write is limited to at most max_cluster_pgcount pages
+ * we can just use the start and last blocknos of the write
+ * to generate the cluster limits
+ */
+ wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
+ }
+ break;
+ }
+
+ /*