]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ufs/ufs_readwrite.c
42cd1380119bd6acea688ca379701db6f396746c
[apple/xnu.git] / bsd / ufs / ufs / ufs_readwrite.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
26 /*-
27 * Copyright (c) 1993
28 * The Regents of the University of California. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
59 */
60
61 #define BLKSIZE(a, b, c) blksize(a, b, c)
62 #define FS struct fs
63 #define I_FS i_fs
64 #define PGRD ffs_pgrd
65 #define PGRD_S "ffs_pgrd"
66 #define PGWR ffs_pgwr
67 #define PGWR_S "ffs_pgwr"
68
69 /*
70 * Vnode op for reading.
71 */
72 /* ARGSUSED */
73 ffs_read(ap)
74 struct vop_read_args /* {
75 struct vnode *a_vp;
76 struct uio *a_uio;
77 int a_ioflag;
78 struct ucred *a_cred;
79 } */ *ap;
80 {
81 register struct vnode *vp;
82 register struct inode *ip;
83 register struct uio *uio;
84 register FS *fs;
85 struct buf *bp = (struct buf *)0;
86 ufs_daddr_t lbn, nextlbn;
87 off_t bytesinfile;
88 long size, xfersize, blkoffset;
89 int devBlockSize=0;
90 int error;
91 u_short mode;
92 #if REV_ENDIAN_FS
93 int rev_endian=0;
94 #endif /* REV_ENDIAN_FS */
95
96 vp = ap->a_vp;
97 ip = VTOI(vp);
98 mode = ip->i_mode;
99 uio = ap->a_uio;
100
101 #if REV_ENDIAN_FS
102 rev_endian=(vp->v_mount->mnt_flag & MNT_REVEND);
103 #endif /* REV_ENDIAN_FS */
104
105 #if DIAGNOSTIC
106 if (uio->uio_rw != UIO_READ)
107 panic("ffs_read: invalid uio_rw = %x", uio->uio_rw);
108
109 if (vp->v_type == VLNK) {
110 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
111 panic("ffs_read: short symlink = %d", ip->i_size);
112 } else if (vp->v_type != VREG && vp->v_type != VDIR)
113 panic("ffs_read: invalid v_type = %x", vp->v_type);
114 #endif
115 fs = ip->I_FS;
116 if (uio->uio_offset < 0)
117 return (EINVAL);
118 if (uio->uio_offset > fs->fs_maxfilesize)
119 return (EFBIG);
120
121 VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize);
122
123 if (UBCISVALID(vp)) {
124 error = cluster_read(vp, uio, (off_t)ip->i_size,
125 devBlockSize, 0);
126 } else {
127 for (error = 0, bp = NULL; uio->uio_resid > 0;
128 bp = NULL) {
129 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
130 break;
131 lbn = lblkno(fs, uio->uio_offset);
132 nextlbn = lbn + 1;
133 size = BLKSIZE(fs, ip, lbn);
134 blkoffset = blkoff(fs, uio->uio_offset);
135 xfersize = fs->fs_bsize - blkoffset;
136 if (uio->uio_resid < xfersize)
137 xfersize = uio->uio_resid;
138 if (bytesinfile < xfersize)
139 xfersize = bytesinfile;
140
141 if (lblktosize(fs, nextlbn) >= ip->i_size)
142 error = bread(vp, lbn, size, NOCRED, &bp);
143 else if (lbn - 1 == vp->v_lastr && !(vp->v_flag & VRAOFF)) {
144 int nextsize = BLKSIZE(fs, ip, nextlbn);
145 error = breadn(vp, lbn,
146 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
147 } else
148 error = bread(vp, lbn, size, NOCRED, &bp);
149 if (error)
150 break;
151 vp->v_lastr = lbn;
152
153 /*
154 * We should only get non-zero b_resid when an I/O error
155 * has occurred, which should cause us to break above.
156 * However, if the short read did not cause an error,
157 * then we want to ensure that we do not uiomove bad
158 * or uninitialized data.
159 */
160 size -= bp->b_resid;
161 if (size < xfersize) {
162 if (size == 0)
163 break;
164 xfersize = size;
165 }
166 #if REV_ENDIAN_FS
167 if (rev_endian && S_ISDIR(mode)) {
168 byte_swap_dir_block_in((char *)bp->b_data + blkoffset, xfersize);
169 }
170 #endif /* REV_ENDIAN_FS */
171 if (error =
172 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio)) {
173 #if REV_ENDIAN_FS
174 if (rev_endian && S_ISDIR(mode)) {
175 byte_swap_dir_block_in((char *)bp->b_data + blkoffset, xfersize);
176 }
177 #endif /* REV_ENDIAN_FS */
178 break;
179 }
180
181 #if REV_ENDIAN_FS
182 if (rev_endian && S_ISDIR(mode)) {
183 byte_swap_dir_out((char *)bp->b_data + blkoffset, xfersize);
184 }
185 #endif /* REV_ENDIAN_FS */
186 if (S_ISREG(mode) && (xfersize + blkoffset == fs->fs_bsize ||
187 uio->uio_offset == ip->i_size))
188 bp->b_flags |= B_AGE;
189 brelse(bp);
190 }
191 }
192 if (bp != NULL)
193 brelse(bp);
194 ip->i_flag |= IN_ACCESS;
195 return (error);
196 }
197
198 /*
199 * Vnode op for writing.
200 */
201 ffs_write(ap)
202 struct vop_write_args /* {
203 struct vnode *a_vp;
204 struct uio *a_uio;
205 int a_ioflag;
206 struct ucred *a_cred;
207 } */ *ap;
208 {
209 register struct vnode *vp;
210 register struct uio *uio;
211 register struct inode *ip;
212 register FS *fs;
213 struct buf *bp;
214 struct proc *p;
215 ufs_daddr_t lbn;
216 off_t osize;
217 int blkoffset, flags, ioflag, resid, rsd, size, xfersize;
218 int devBlockSize=0;
219 int save_error=0, save_size=0;
220 int blkalloc = 0;
221 int error = 0;
222 int file_extended = 0;
223 int doingdirectory = 0;
224
225 #if REV_ENDIAN_FS
226 int rev_endian=0;
227 #endif /* REV_ENDIAN_FS */
228
229 ioflag = ap->a_ioflag;
230 uio = ap->a_uio;
231 vp = ap->a_vp;
232 ip = VTOI(vp);
233 #if REV_ENDIAN_FS
234 rev_endian=(vp->v_mount->mnt_flag & MNT_REVEND);
235 #endif /* REV_ENDIAN_FS */
236
237 #if DIAGNOSTIC
238 if (uio->uio_rw != UIO_WRITE)
239 panic("ffs_write: uio_rw = %x\n", uio->uio_rw);
240 #endif
241
242 switch (vp->v_type) {
243 case VREG:
244 if (ioflag & IO_APPEND)
245 uio->uio_offset = ip->i_size;
246 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
247 return (EPERM);
248 /* FALLTHROUGH */
249 case VLNK:
250 break;
251 case VDIR:
252 doingdirectory = 1;
253 if ((ioflag & IO_SYNC) == 0)
254 panic("ffs_write: nonsync dir write");
255 break;
256 default:
257 panic("ffs_write: invalid v_type=%x", vp->v_type);
258 }
259
260 fs = ip->I_FS;
261 if (uio->uio_offset < 0 ||
262 (u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
263 return (EFBIG);
264 if (uio->uio_resid == 0)
265 return (0);
266
267 VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize);
268
269 /*
270 * Maybe this should be above the vnode op call, but so long as
271 * file servers have no limits, I don't think it matters.
272 */
273 p = uio->uio_procp;
274 if (vp->v_type == VREG && p &&
275 uio->uio_offset + uio->uio_resid >
276 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
277 psignal(p, SIGXFSZ);
278 return (EFBIG);
279 }
280
281 resid = uio->uio_resid;
282 osize = ip->i_size;
283 flags = 0;
284 if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC))
285 flags = B_SYNC;
286
287 if (UBCISVALID(vp)) {
288 off_t filesize;
289 off_t endofwrite;
290 off_t local_offset;
291 off_t head_offset;
292 int local_flags;
293 int first_block;
294 int fboff;
295 int fblk;
296 int loopcount;
297
298 endofwrite = uio->uio_offset + uio->uio_resid;
299
300 if (endofwrite > ip->i_size) {
301 filesize = endofwrite;
302 file_extended = 1;
303 } else
304 filesize = ip->i_size;
305
306 head_offset = ip->i_size;
307
308 /* Go ahead and allocate the block that are going to be written */
309 rsd = uio->uio_resid;
310 local_offset = uio->uio_offset;
311 local_flags = 0;
312 if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC))
313 local_flags = B_SYNC;
314 local_flags |= B_NOBUFF;
315
316 first_block = 1;
317 fboff = 0;
318 fblk = 0;
319 loopcount = 0;
320
321 for (error = 0; rsd > 0;) {
322 blkalloc = 0;
323 lbn = lblkno(fs, local_offset);
324 blkoffset = blkoff(fs, local_offset);
325 xfersize = fs->fs_bsize - blkoffset;
326 if (first_block)
327 fboff = blkoffset;
328 if (rsd < xfersize)
329 xfersize = rsd;
330 if (fs->fs_bsize > xfersize)
331 local_flags |= B_CLRBUF;
332 else
333 local_flags &= ~B_CLRBUF;
334
335 /* Allocate block without reading into a buf */
336 error = ffs_balloc(ip,
337 lbn, blkoffset + xfersize, ap->a_cred,
338 &bp, local_flags, &blkalloc);
339 if (error)
340 break;
341 if (first_block) {
342 fblk = blkalloc;
343 first_block = 0;
344 }
345 loopcount++;
346
347 rsd -= xfersize;
348 local_offset += (off_t)xfersize;
349 if (local_offset > ip->i_size)
350 ip->i_size = local_offset;
351 }
352
353 if(error) {
354 save_error = error;
355 save_size = rsd;
356 uio->uio_resid -= rsd;
357 if (file_extended)
358 filesize -= rsd;
359 }
360
361 flags = ioflag & IO_SYNC ? IO_SYNC : 0;
362 /* flags |= IO_NOZEROVALID; */
363
364 if((error == 0) && fblk && fboff) {
365 if( fblk > fs->fs_bsize)
366 panic("ffs_balloc : allocated more than bsize(head)");
367 /* We need to zero out the head */
368 head_offset = uio->uio_offset - (off_t)fboff ;
369 flags |= IO_HEADZEROFILL;
370 /* flags &= ~IO_NOZEROVALID; */
371 }
372
373 if((error == 0) && blkalloc && ((blkalloc - xfersize) > 0)) {
374 /* We need to zero out the tail */
375 if( blkalloc > fs->fs_bsize)
376 panic("ffs_balloc : allocated more than bsize(tail)");
377 local_offset += (blkalloc - xfersize);
378 if (loopcount == 1) {
379 /* blkalloc is same as fblk; so no need to check again*/
380 local_offset -= fboff;
381 }
382 flags |= IO_TAILZEROFILL;
383 /* Freshly allocated block; bzero even if
384 * find a page
385 */
386 /* flags &= ~IO_NOZEROVALID; */
387 }
388 /*
389 * if the write starts beyond the current EOF then
390 * we we'll zero fill from the current EOF to where the write begins
391 */
392
393 error = cluster_write(vp, uio, osize, filesize, head_offset, local_offset, devBlockSize, flags);
394
395 if (uio->uio_offset > osize) {
396 if (error && ((ioflag & IO_UNIT)==0))
397 (void)VOP_TRUNCATE(vp, uio->uio_offset,
398 ioflag & IO_SYNC, ap->a_cred, uio->uio_procp);
399 ip->i_size = uio->uio_offset;
400 ubc_setsize(vp, (off_t)ip->i_size);
401 }
402 if(save_error) {
403 uio->uio_resid += save_size;
404 if(!error)
405 error = save_error;
406 }
407 ip->i_flag |= IN_CHANGE | IN_UPDATE;
408 } else {
409 flags = 0;
410 if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC))
411 flags = B_SYNC;
412
413 for (error = 0; uio->uio_resid > 0;) {
414 lbn = lblkno(fs, uio->uio_offset);
415 blkoffset = blkoff(fs, uio->uio_offset);
416 xfersize = fs->fs_bsize - blkoffset;
417 if (uio->uio_resid < xfersize)
418 xfersize = uio->uio_resid;
419
420 if (fs->fs_bsize > xfersize)
421 flags |= B_CLRBUF;
422 else
423 flags &= ~B_CLRBUF;
424
425 error = ffs_balloc(ip,
426 lbn, blkoffset + xfersize, ap->a_cred, &bp, flags, 0);
427 if (error)
428 break;
429 if (uio->uio_offset + xfersize > ip->i_size) {
430 ip->i_size = uio->uio_offset + xfersize;
431
432 if (UBCISVALID(vp))
433 ubc_setsize(vp, (u_long)ip->i_size); /* XXX check errors */
434 }
435
436 size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
437 if (size < xfersize)
438 xfersize = size;
439
440 error =
441 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
442 #if REV_ENDIAN_FS
443 if (rev_endian && S_ISDIR(ip->i_mode)) {
444 byte_swap_dir_out((char *)bp->b_data + blkoffset, xfersize);
445 }
446 #endif /* REV_ENDIAN_FS */
447 if (doingdirectory == 0 && (ioflag & IO_SYNC))
448 (void)bwrite(bp);
449 else if (xfersize + blkoffset == fs->fs_bsize) {
450 bp->b_flags |= B_AGE;
451 bdwrite(bp);
452 }
453 else
454 bdwrite(bp);
455 if (error || xfersize == 0)
456 break;
457 ip->i_flag |= IN_CHANGE | IN_UPDATE;
458 }
459 }
460 /*
461 * If we successfully wrote any data, and we are not the superuser
462 * we clear the setuid and setgid bits as a precaution against
463 * tampering.
464 */
465 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
466 ip->i_mode &= ~(ISUID | ISGID);
467 if (resid > uio->uio_resid)
468 VN_KNOTE(vp, NOTE_WRITE | (file_extended ? NOTE_EXTEND : 0));
469 if (error) {
470 if (ioflag & IO_UNIT) {
471 (void)VOP_TRUNCATE(vp, osize,
472 ioflag & IO_SYNC, ap->a_cred, uio->uio_procp);
473 uio->uio_offset -= resid - uio->uio_resid;
474 uio->uio_resid = resid;
475 }
476 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
477 error = VOP_UPDATE(vp, (struct timeval *)&time,
478 (struct timeval *)&time, 1);
479 return (error);
480 }
481
482 /*
483 * Vnode op for pagein.
484 * Similar to ffs_read()
485 */
486 /* ARGSUSED */
487 ffs_pagein(ap)
488 struct vop_pagein_args /* {
489 struct vnode *a_vp,
490 upl_t a_pl,
491 vm_offset_t a_pl_offset,
492 off_t a_f_offset,
493 size_t a_size,
494 struct ucred *a_cred,
495 int a_flags
496 } */ *ap;
497 {
498 register struct vnode *vp = ap->a_vp;
499 upl_t pl = ap->a_pl;
500 size_t size= ap->a_size;
501 off_t f_offset = ap->a_f_offset;
502 vm_offset_t pl_offset = ap->a_pl_offset;
503 int flags = ap->a_flags;
504 register struct inode *ip;
505 int devBlockSize=0;
506 int error;
507
508 ip = VTOI(vp);
509
510 /* check pageins for reg file only and ubc info is present*/
511 if (UBCINVALID(vp))
512 panic("ffs_pagein: Not a VREG: vp=%x", vp);
513 if (UBCINFOMISSING(vp))
514 panic("ffs_pagein: No mapping: vp=%x", vp);
515
516 #if DIAGNOSTIC
517 if (vp->v_type == VLNK) {
518 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
519 panic("%s: short symlink", "ffs_pagein");
520 } else if (vp->v_type != VREG && vp->v_type != VDIR)
521 panic("%s: type %d", "ffs_pagein", vp->v_type);
522 #endif
523
524 VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize);
525
526 error = cluster_pagein(vp, pl, pl_offset, f_offset, size,
527 (off_t)ip->i_size, devBlockSize, flags);
528 /* ip->i_flag |= IN_ACCESS; */
529 return (error);
530 }
531
532 /*
533 * Vnode op for pageout.
534 * Similar to ffs_write()
535 * make sure the buf is not in hash queue when you return
536 */
537 ffs_pageout(ap)
538 struct vop_pageout_args /* {
539 struct vnode *a_vp,
540 upl_t a_pl,
541 vm_offset_t a_pl_offset,
542 off_t a_f_offset,
543 size_t a_size,
544 struct ucred *a_cred,
545 int a_flags
546 } */ *ap;
547 {
548 register struct vnode *vp = ap->a_vp;
549 upl_t pl = ap->a_pl;
550 size_t size= ap->a_size;
551 off_t f_offset = ap->a_f_offset;
552 vm_offset_t pl_offset = ap->a_pl_offset;
553 int flags = ap->a_flags;
554 register struct inode *ip;
555 register FS *fs;
556 int error ;
557 int devBlockSize=0;
558 size_t xfer_size = 0;
559 int local_flags=0;
560 off_t local_offset;
561 int resid, blkoffset;
562 size_t xsize, lsize;
563 daddr_t lbn;
564 int save_error =0, save_size=0;
565 vm_offset_t lupl_offset;
566 int nocommit = flags & UPL_NOCOMMIT;
567 struct buf *bp;
568
569 ip = VTOI(vp);
570
571 /* check pageouts for reg file only and ubc info is present*/
572 if (UBCINVALID(vp))
573 panic("ffs_pageout: Not a VREG: vp=%x", vp);
574 if (UBCINFOMISSING(vp))
575 panic("ffs_pageout: No mapping: vp=%x", vp);
576
577 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
578 if (!nocommit)
579 ubc_upl_abort_range(pl, pl_offset, size,
580 UPL_ABORT_FREE_ON_EMPTY);
581 return (EROFS);
582 }
583 fs = ip->I_FS;
584
585 if (f_offset < 0 || f_offset >= ip->i_size) {
586 if (!nocommit)
587 ubc_upl_abort_range(pl, pl_offset, size,
588 UPL_ABORT_FREE_ON_EMPTY);
589 return (EINVAL);
590 }
591
592 /*
593 * once we enable multi-page pageouts we will
594 * need to make sure we abort any pages in the upl
595 * that we don't issue an I/O for
596 */
597 if (f_offset + size > ip->i_size)
598 xfer_size = ip->i_size - f_offset;
599 else
600 xfer_size = size;
601
602 VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize);
603
604 if (xfer_size & (PAGE_SIZE - 1)) {
605 /* if not a multiple of page size
606 * then round up to be a multiple
607 * the physical disk block size
608 */
609 xfer_size = (xfer_size + (devBlockSize - 1)) & ~(devBlockSize - 1);
610 }
611
612 /*
613 * once the block allocation is moved to ufs_cmap
614 * we can remove all the size and offset checks above
615 * cluster_pageout does all of this now
616 * we need to continue to do it here so as not to
617 * allocate blocks that aren't going to be used because
618 * of a bogus parameter being passed in
619 */
620 local_flags = 0;
621 resid = xfer_size;
622 local_offset = f_offset;
623 for (error = 0; resid > 0;) {
624 lbn = lblkno(fs, local_offset);
625 blkoffset = blkoff(fs, local_offset);
626 xsize = fs->fs_bsize - blkoffset;
627 if (resid < xsize)
628 xsize = resid;
629 /* Allocate block without reading into a buf */
630 error = ffs_blkalloc(ip,
631 lbn, blkoffset + xsize, ap->a_cred,
632 local_flags);
633 if (error)
634 break;
635 resid -= xsize;
636 local_offset += (off_t)xsize;
637 }
638
639 if (error) {
640 save_size = resid;
641 save_error = error;
642 xfer_size -= save_size;
643 }
644
645
646 error = cluster_pageout(vp, pl, pl_offset, f_offset, round_page_32(xfer_size), ip->i_size, devBlockSize, flags);
647
648 if(save_error) {
649 lupl_offset = size - save_size;
650 resid = round_page_32(save_size);
651 if (!nocommit)
652 ubc_upl_abort_range(pl, lupl_offset, resid,
653 UPL_ABORT_FREE_ON_EMPTY);
654 if(!error)
655 error= save_error;
656 }
657 return (error);
658 }