]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ufs/ufs_readwrite.c
xnu-1228.tar.gz
[apple/xnu.git] / bsd / ufs / ufs / ufs_readwrite.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
62 */
63
64 #include <sys/buf_internal.h>
65 #include <sys/uio_internal.h>
66
67
68 #define BLKSIZE(a, b, c) blksize(a, b, c)
69 #define FS struct fs
70 #define I_FS i_fs
71
72
73
74 /*
75 * Vnode op for reading.
76 */
77 /* ARGSUSED */
78 ffs_read(ap)
79 struct vnop_read_args /* {
80 struct vnode *a_vp;
81 struct uio *a_uio;
82 int a_ioflag;
83 vfs_context_t a_context;
84 } */ *ap;
85 {
86 return(ffs_read_internal(ap->a_vp, ap->a_uio, ap->a_ioflag));
87 }
88
89
90 int
91 ffs_read_internal(vnode_t vp, struct uio *uio, int ioflag)
92 {
93 struct inode *ip;
94 FS *fs;
95 buf_t bp = (struct buf *)0;
96 ufs_daddr_t lbn, nextlbn;
97 off_t bytesinfile;
98 long size, xfersize, blkoffset;
99 int error;
100 u_short mode;
101 #if REV_ENDIAN_FS
102 int rev_endian=0;
103 #endif /* REV_ENDIAN_FS */
104
105 ip = VTOI(vp);
106 mode = ip->i_mode;
107
108 #if REV_ENDIAN_FS
109 rev_endian=(vp->v_mount->mnt_flag & MNT_REVEND);
110 #endif /* REV_ENDIAN_FS */
111
112 #if DIAGNOSTIC
113 if (uio->uio_rw != UIO_READ)
114 panic("ffs_read: invalid uio_rw = %x", uio->uio_rw);
115
116 if (vp->v_type == VLNK) {
117 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
118 panic("ffs_read: short symlink = %d", ip->i_size);
119 } else if (vp->v_type != VREG && vp->v_type != VDIR)
120 panic("ffs_read: invalid v_type = %x", vp->v_type);
121 #endif
122 fs = ip->I_FS;
123 if (uio->uio_offset < 0)
124 return (EINVAL);
125 if (uio->uio_offset > fs->fs_maxfilesize)
126 return (EFBIG);
127
128 if (UBCINFOEXISTS(vp)) {
129 error = cluster_read(vp, uio, (off_t)ip->i_size, ioflag);
130 } else {
131 for (error = 0, bp = NULL; uio_resid(uio) > 0;
132 bp = NULL) {
133 char *buf_data;
134
135 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
136 break;
137 lbn = lblkno(fs, uio->uio_offset);
138 nextlbn = lbn + 1;
139 size = BLKSIZE(fs, ip, lbn);
140 blkoffset = blkoff(fs, uio->uio_offset);
141 xfersize = fs->fs_bsize - blkoffset;
142 // LP64todo - fix this
143 if (uio_resid(uio) < xfersize)
144 xfersize = uio_resid(uio);
145 if (bytesinfile < xfersize)
146 xfersize = bytesinfile;
147
148 if (lblktosize(fs, nextlbn) >= ip->i_size)
149 error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), size, NOCRED, &bp);
150 else if (lbn - 1 == ip->i_lastr && !(vp->v_flag & VRAOFF)) {
151 int nextsize = BLKSIZE(fs, ip, nextlbn);
152 error = (int)buf_breadn(vp, (daddr64_t)((unsigned)lbn),
153 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
154 } else
155 error = (int)buf_bread(vp, lbn, size, NOCRED, &bp);
156 if (error)
157 break;
158 ip->i_lastr = lbn;
159
160 /*
161 * We should only get non-zero buffer resid when an I/O error
162 * has occurred, which should cause us to break above.
163 * However, if the short read did not cause an error,
164 * then we want to ensure that we do not uiomove bad
165 * or uninitialized data.
166 */
167 size -= buf_resid(bp);
168 if (size < xfersize) {
169 if (size == 0)
170 break;
171 xfersize = size;
172 }
173 buf_data = (char *)buf_dataptr(bp);
174 #if REV_ENDIAN_FS
175 if (rev_endian && S_ISDIR(mode)) {
176 byte_swap_dir_block_in(buf_data + blkoffset, xfersize);
177 }
178 #endif /* REV_ENDIAN_FS */
179 if (error =
180 uiomove(buf_data + blkoffset, (int)xfersize, uio)) {
181 #if REV_ENDIAN_FS
182 if (rev_endian && S_ISDIR(mode)) {
183 byte_swap_dir_block_in(buf_data + blkoffset, xfersize);
184 }
185 #endif /* REV_ENDIAN_FS */
186 break;
187 }
188
189 #if REV_ENDIAN_FS
190 if (rev_endian && S_ISDIR(mode)) {
191 byte_swap_dir_out(buf_data + blkoffset, xfersize);
192 }
193 #endif /* REV_ENDIAN_FS */
194 if (S_ISREG(mode) && (xfersize + blkoffset == fs->fs_bsize ||
195 uio->uio_offset == ip->i_size))
196 buf_markaged(bp);
197 buf_brelse(bp);
198 }
199 }
200 if (bp != NULL)
201 buf_brelse(bp);
202 if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0)
203 ip->i_flag |= IN_ACCESS;
204 return (error);
205 }
206
207 /*
208 * Vnode op for writing.
209 */
210 ffs_write(ap)
211 struct vnop_write_args /* {
212 struct vnode *a_vp;
213 struct uio *a_uio;
214 int a_ioflag;
215 vfs_context_t a_context;
216 } */ *ap;
217 {
218 return(ffs_write_internal(ap->a_vp, ap->a_uio, ap->a_ioflag, vfs_context_ucred(ap->a_context)));
219 }
220
221
222 ffs_write_internal(vnode_t vp, struct uio *uio, int ioflag, kauth_cred_t cred)
223 {
224 buf_t bp;
225 proc_t p;
226 struct inode *ip;
227 FS *fs;
228 ufs_daddr_t lbn;
229 off_t osize;
230 int blkoffset, flags, resid, rsd, size, xfersize;
231 int save_error=0, save_size=0;
232 int blkalloc = 0;
233 int error = 0;
234 int file_extended = 0;
235 int doingdirectory = 0;
236 user_ssize_t clippedsize = 0; /* Truncate writes near fs->fs_maxfilesize */
237 user_ssize_t residcount, oldcount;
238 int partialwrite=0;
239
240 #if REV_ENDIAN_FS
241 int rev_endian=0;
242 #endif /* REV_ENDIAN_FS */
243
244 ip = VTOI(vp);
245 #if REV_ENDIAN_FS
246 rev_endian=(vp->v_mount->mnt_flag & MNT_REVEND);
247 #endif /* REV_ENDIAN_FS */
248
249 #if DIAGNOSTIC
250 if (uio->uio_rw != UIO_WRITE)
251 panic("ffs_write: uio_rw = %x\n", uio->uio_rw);
252 #endif
253
254 switch (vp->v_type) {
255 case VREG:
256 if (ioflag & IO_APPEND)
257 uio->uio_offset = ip->i_size;
258 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
259 return (EPERM);
260 /* FALLTHROUGH */
261 case VLNK:
262 break;
263 case VDIR:
264 doingdirectory = 1;
265 if ((ioflag & IO_SYNC) == 0)
266 panic("ffs_write: nonsync dir write");
267 break;
268 default:
269 panic("ffs_write: invalid v_type=%x", vp->v_type);
270 }
271
272 fs = ip->I_FS;
273 if (uio->uio_offset < 0)
274 return (EFBIG);
275 if ( uio_resid(uio) > fs->fs_maxfilesize - uio->uio_offset ) {
276 residcount = uio_resid(uio);
277 clippedsize = residcount - (fs->fs_maxfilesize - uio->uio_offset);
278 if (clippedsize >= residcount) {
279 return (EFBIG);
280 } else {
281 uio_setresid(uio, residcount - clippedsize);
282 partialwrite = 1;
283 }
284 }
285 if (uio_resid(uio) == 0)
286 return (0);
287
288 // LP64todo - fix this
289 resid = uio_resid(uio);
290 osize = ip->i_size;
291 flags = 0;
292 if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC))
293 flags = B_SYNC;
294
295 if (UBCINFOEXISTS(vp)) {
296 off_t filesize;
297 off_t endofwrite;
298 off_t local_offset;
299 off_t head_offset;
300 int local_flags;
301 int first_block;
302 int fboff;
303 int fblk;
304 int loopcount;
305
306 // LP64todo - fix this
307 endofwrite = uio->uio_offset + uio_resid(uio);
308
309 if (endofwrite > ip->i_size) {
310 filesize = endofwrite;
311 file_extended = 1;
312 } else
313 filesize = ip->i_size;
314
315 head_offset = ip->i_size;
316
317 /* Go ahead and allocate the block that are going to be written */
318 // LP64todo - fix this
319 rsd = uio_resid(uio);
320 local_offset = uio->uio_offset;
321 local_flags = 0;
322 if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC))
323 local_flags = B_SYNC;
324 local_flags |= B_NOBUFF;
325
326 first_block = 1;
327 fboff = 0;
328 fblk = 0;
329 loopcount = 0;
330
331 for (error = 0; rsd > 0;) {
332 blkalloc = 0;
333 lbn = lblkno(fs, local_offset);
334 blkoffset = blkoff(fs, local_offset);
335 xfersize = fs->fs_bsize - blkoffset;
336 if (first_block)
337 fboff = blkoffset;
338 if (rsd < xfersize)
339 xfersize = rsd;
340 if (fs->fs_bsize > xfersize)
341 local_flags |= B_CLRBUF;
342 else
343 local_flags &= ~B_CLRBUF;
344
345 /* Allocate block without reading into a buf */
346 error = ffs_balloc(ip,
347 lbn, blkoffset + xfersize, cred,
348 &bp, local_flags, &blkalloc);
349 if (error)
350 break;
351 if (first_block) {
352 fblk = blkalloc;
353 first_block = 0;
354 }
355 loopcount++;
356
357 rsd -= xfersize;
358 local_offset += (off_t)xfersize;
359 if (local_offset > ip->i_size)
360 ip->i_size = local_offset;
361 }
362
363 if(error) {
364 save_error = error;
365 save_size = rsd;
366 uio_setresid(uio, (uio_resid(uio) - rsd));
367 if (file_extended)
368 filesize -= rsd;
369 }
370
371 flags = ioflag & ~(IO_TAILZEROFILL | IO_HEADZEROFILL | IO_NOZEROVALID | IO_NOZERODIRTY);
372
373 if((error == 0) && fblk && fboff) {
374 if( fblk > fs->fs_bsize)
375 panic("ffs_balloc : allocated more than bsize(head)");
376 /* We need to zero out the head */
377 head_offset = uio->uio_offset - (off_t)fboff ;
378 flags |= IO_HEADZEROFILL;
379 }
380
381 if((error == 0) && blkalloc && ((blkalloc - xfersize) > 0)) {
382 /* We need to zero out the tail */
383 if( blkalloc > fs->fs_bsize)
384 panic("ffs_balloc : allocated more than bsize(tail)");
385 local_offset += (blkalloc - xfersize);
386 if (loopcount == 1) {
387 /* blkalloc is same as fblk; so no need to check again*/
388 local_offset -= fboff;
389 }
390 flags |= IO_TAILZEROFILL;
391 /* Freshly allocated block; bzero even if
392 * find a page
393 */
394 /* flags &= ~IO_NOZEROVALID; */
395 }
396 /*
397 * if the write starts beyond the current EOF then
398 * we we'll zero fill from the current EOF to where the write begins
399 */
400
401 error = cluster_write(vp, uio, osize, filesize, head_offset, local_offset, flags);
402
403 if (uio->uio_offset > osize) {
404 if (error && ((ioflag & IO_UNIT)==0))
405 (void)ffs_truncate_internal(vp, uio->uio_offset, ioflag & IO_SYNC, cred);
406 ip->i_size = uio->uio_offset;
407 ubc_setsize(vp, (off_t)ip->i_size);
408 }
409 if(save_error) {
410 uio_setresid(uio, (uio_resid(uio) + save_size));
411 if(!error)
412 error = save_error;
413 }
414 ip->i_flag |= IN_CHANGE | IN_UPDATE;
415 } else {
416 flags = 0;
417 if ((ioflag & IO_SYNC) && !((vp)->v_mount->mnt_flag & MNT_ASYNC))
418 flags = B_SYNC;
419
420 for (error = 0; uio_resid(uio) > 0;) {
421 char *buf_data;
422
423 lbn = lblkno(fs, uio->uio_offset);
424 blkoffset = blkoff(fs, uio->uio_offset);
425 xfersize = fs->fs_bsize - blkoffset;
426 if (uio_resid(uio) < xfersize)
427 // LP64todo - fix this
428 xfersize = uio_resid(uio);
429
430 if (fs->fs_bsize > xfersize)
431 flags |= B_CLRBUF;
432 else
433 flags &= ~B_CLRBUF;
434
435 error = ffs_balloc(ip, lbn, blkoffset + xfersize, cred, &bp, flags, 0);
436 if (error)
437 break;
438 if (uio->uio_offset + xfersize > ip->i_size) {
439 ip->i_size = uio->uio_offset + xfersize;
440 ubc_setsize(vp, (u_long)ip->i_size);
441 }
442
443 size = BLKSIZE(fs, ip, lbn) - buf_resid(bp);
444 if (size < xfersize)
445 xfersize = size;
446
447 buf_data = (char *)buf_dataptr(bp);
448
449 error = uiomove(buf_data + blkoffset, (int)xfersize, uio);
450 #if REV_ENDIAN_FS
451 if (rev_endian && S_ISDIR(ip->i_mode)) {
452 byte_swap_dir_out(buf_data + blkoffset, xfersize);
453 }
454 #endif /* REV_ENDIAN_FS */
455 if (doingdirectory == 0 && (ioflag & IO_SYNC))
456 (void)buf_bwrite(bp);
457 else if (xfersize + blkoffset == fs->fs_bsize) {
458 buf_markaged(bp);
459 buf_bdwrite(bp);
460 }
461 else
462 buf_bdwrite(bp);
463 if (error || xfersize == 0)
464 break;
465 ip->i_flag |= IN_CHANGE | IN_UPDATE;
466 }
467 }
468 /*
469 * If we successfully wrote any data, and we are not the superuser
470 * we clear the setuid and setgid bits as a precaution against
471 * tampering.
472 */
473 if (resid > uio_resid(uio) && cred && suser(cred, NULL))
474 ip->i_mode &= ~(ISUID | ISGID);
475 if (resid > uio_resid(uio))
476 VN_KNOTE(vp, NOTE_WRITE | (file_extended ? NOTE_EXTEND : 0));
477 if (error) {
478 if (ioflag & IO_UNIT) {
479 (void)ffs_truncate_internal(vp, osize, ioflag & IO_SYNC, cred);
480 // LP64todo - fix this
481 uio->uio_offset -= resid - uio_resid(uio);
482 uio_setresid(uio, resid);
483 }
484 } else if (resid > uio_resid(uio) && (ioflag & IO_SYNC)) {
485 struct timeval tv;
486
487 microtime(&tv);
488 error = ffs_update(vp, &tv, &tv, 1);
489 }
490 if (partialwrite) {
491 oldcount = uio_resid(uio);
492 uio_setresid(uio, oldcount + clippedsize);
493 }
494 return (error);
495 }
496
497 /*
498 * Vnode op for pagein.
499 * Similar to ffs_read()
500 */
501 /* ARGSUSED */
502 ffs_pagein(ap)
503 struct vnop_pagein_args /* {
504 struct vnode *a_vp,
505 upl_t a_pl,
506 vm_offset_t a_pl_offset,
507 off_t a_f_offset,
508 size_t a_size,
509 int a_flags
510 vfs_context_t a_context;
511 } */ *ap;
512 {
513 register struct vnode *vp = ap->a_vp;
514 upl_t pl = ap->a_pl;
515 size_t size= ap->a_size;
516 off_t f_offset = ap->a_f_offset;
517 vm_offset_t pl_offset = ap->a_pl_offset;
518 int flags = ap->a_flags;
519 register struct inode *ip;
520 int error;
521
522 ip = VTOI(vp);
523
524 #if DIAGNOSTIC
525 if (vp->v_type == VLNK) {
526 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
527 panic("%s: short symlink", "ffs_pagein");
528 } else if (vp->v_type != VREG && vp->v_type != VDIR)
529 panic("%s: type %d", "ffs_pagein", vp->v_type);
530 #endif
531
532 error = cluster_pagein(vp, pl, pl_offset, f_offset, size, (off_t)ip->i_size, flags);
533
534 /* ip->i_flag |= IN_ACCESS; */
535 return (error);
536 }
537
538 /*
539 * Vnode op for pageout.
540 * Similar to ffs_write()
541 * make sure the buf is not in hash queue when you return
542 */
543 ffs_pageout(ap)
544 struct vnop_pageout_args /* {
545 struct vnode *a_vp,
546 upl_t a_pl,
547 vm_offset_t a_pl_offset,
548 off_t a_f_offset,
549 size_t a_size,
550 int a_flags
551 vfs_context_t a_context;
552 } */ *ap;
553 {
554 register struct vnode *vp = ap->a_vp;
555 upl_t pl = ap->a_pl;
556 size_t size= ap->a_size;
557 off_t f_offset = ap->a_f_offset;
558 vm_offset_t pl_offset = ap->a_pl_offset;
559 int flags = ap->a_flags;
560 register struct inode *ip;
561 register FS *fs;
562 int error ;
563 size_t xfer_size = 0;
564 int local_flags=0;
565 off_t local_offset;
566 int resid, blkoffset;
567 size_t xsize, lsize;
568 daddr_t lbn;
569 int save_error =0, save_size=0;
570 vm_offset_t lupl_offset;
571 int nocommit = flags & UPL_NOCOMMIT;
572 int devBlockSize = 0;
573 struct buf *bp;
574
575 ip = VTOI(vp);
576
577 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
578 if (!nocommit)
579 ubc_upl_abort_range(pl, pl_offset, size,
580 UPL_ABORT_FREE_ON_EMPTY);
581 return (EROFS);
582 }
583 fs = ip->I_FS;
584
585 if (f_offset < 0 || f_offset >= ip->i_size) {
586 if (!nocommit)
587 ubc_upl_abort_range(pl, pl_offset, size,
588 UPL_ABORT_FREE_ON_EMPTY);
589 return (EINVAL);
590 }
591
592 /*
593 * once we enable multi-page pageouts we will
594 * need to make sure we abort any pages in the upl
595 * that we don't issue an I/O for
596 */
597 if (f_offset + size > ip->i_size)
598 xfer_size = ip->i_size - f_offset;
599 else
600 xfer_size = size;
601
602 devBlockSize = vfs_devblocksize(vnode_mount(vp));
603
604 if (xfer_size & (PAGE_SIZE - 1)) {
605 /* if not a multiple of page size
606 * then round up to be a multiple
607 * the physical disk block size
608 */
609 xfer_size = (xfer_size + (devBlockSize - 1)) & ~(devBlockSize - 1);
610 }
611
612 /*
613 * once the block allocation is moved to ufs_blockmap
614 * we can remove all the size and offset checks above
615 * cluster_pageout does all of this now
616 * we need to continue to do it here so as not to
617 * allocate blocks that aren't going to be used because
618 * of a bogus parameter being passed in
619 */
620 local_flags = 0;
621 resid = xfer_size;
622 local_offset = f_offset;
623 for (error = 0; resid > 0;) {
624 lbn = lblkno(fs, local_offset);
625 blkoffset = blkoff(fs, local_offset);
626 xsize = fs->fs_bsize - blkoffset;
627 if (resid < xsize)
628 xsize = resid;
629 /* Allocate block without reading into a buf */
630 error = ffs_blkalloc(ip,
631 lbn, blkoffset + xsize, vfs_context_ucred(ap->a_context),
632 local_flags);
633 if (error)
634 break;
635 resid -= xsize;
636 local_offset += (off_t)xsize;
637 }
638
639 if (error) {
640 save_size = resid;
641 save_error = error;
642 xfer_size -= save_size;
643 }
644
645
646 error = cluster_pageout(vp, pl, pl_offset, f_offset, round_page_32(xfer_size), ip->i_size, flags);
647
648 if(save_error) {
649 lupl_offset = size - save_size;
650 resid = round_page_32(save_size);
651 if (!nocommit)
652 ubc_upl_abort_range(pl, lupl_offset, resid,
653 UPL_ABORT_FREE_ON_EMPTY);
654 if(!error)
655 error= save_error;
656 }
657 return (error);
658 }