]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ffs/ffs_balloc.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / bsd / ufs / ffs / ffs_balloc.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
56 */
57
58 #include <rev_endian_fs.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/buf.h>
62 #include <sys/proc.h>
63 #include <sys/file.h>
64 #include <sys/vnode.h>
65 #include <sys/ubc.h>
66 #include <sys/quota.h>
67
68 #if REV_ENDIAN_FS
69 #include <sys/mount.h>
70 #endif /* REV_ENDIAN_FS */
71
72 #include <sys/vm.h>
73
74 #include <ufs/ufs/quota.h>
75 #include <ufs/ufs/inode.h>
76 #include <ufs/ufs/ufs_extern.h>
77
78 #include <ufs/ffs/fs.h>
79 #include <ufs/ffs/ffs_extern.h>
80
81 #if REV_ENDIAN_FS
82 #include <ufs/ufs/ufs_byte_order.h>
83 #include <architecture/byte_order.h>
84 #endif /* REV_ENDIAN_FS */
85
86 /*
87 * Balloc defines the structure of file system storage
88 * by allocating the physical blocks on a device given
89 * the inode and the logical block number in a file.
90 */
91 ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc)
92 register struct inode *ip;
93 register ufs_daddr_t lbn;
94 int size;
95 struct ucred *cred;
96 struct buf **bpp;
97 int flags;
98 int * blk_alloc;
99 {
100 register struct fs *fs;
101 register ufs_daddr_t nb;
102 struct buf *bp, *nbp;
103 struct vnode *vp = ITOV(ip);
104 struct indir indirs[NIADDR + 2];
105 ufs_daddr_t newb, *bap, pref;
106 int deallocated, osize, nsize, num, i, error;
107 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
108 int devBlockSize=0;
109 int alloc_buffer = 1;
110 #if REV_ENDIAN_FS
111 struct mount *mp=vp->v_mount;
112 int rev_endian=(mp->mnt_flag & MNT_REVEND);
113 #endif /* REV_ENDIAN_FS */
114
115 *bpp = NULL;
116 if (lbn < 0)
117 return (EFBIG);
118 fs = ip->i_fs;
119 if (flags & B_NOBUFF)
120 alloc_buffer = 0;
121
122 if (blk_alloc)
123 *blk_alloc = 0;
124
125 /*
126 * If the next write will extend the file into a new block,
127 * and the file is currently composed of a fragment
128 * this fragment has to be extended to be a full block.
129 */
130 nb = lblkno(fs, ip->i_size);
131 if (nb < NDADDR && nb < lbn) {
132 /* the filesize prior to this write can fit in direct
133 * blocks (ie. fragmentaion is possibly done)
134 * we are now extending the file write beyond
135 * the block which has end of file prior to this write
136 */
137 osize = blksize(fs, ip, nb);
138 /* osize gives disk allocated size in the last block. It is
139 * either in fragments or a file system block size */
140 if (osize < fs->fs_bsize && osize > 0) {
141 /* few fragments are already allocated,since the
142 * current extends beyond this block
143 * allocate the complete block as fragments are only
144 * in last block
145 */
146 error = ffs_realloccg(ip, nb,
147 ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
148 osize, (int)fs->fs_bsize, cred, &bp);
149 if (error)
150 return (error);
151 /* adjust the innode size we just grew */
152 /* it is in nb+1 as nb starts from 0 */
153 ip->i_size = (nb + 1) * fs->fs_bsize;
154 if (UBCISVALID(vp))
155 ubc_setsize(vp, (off_t)ip->i_size); /* XXX check error */
156 ip->i_db[nb] = dbtofsb(fs, bp->b_blkno);
157 ip->i_flag |= IN_CHANGE | IN_UPDATE;
158 if ((flags & B_SYNC) || (!alloc_buffer)) {
159 if (!alloc_buffer)
160 SET(bp->b_flags, B_NOCACHE);
161 bwrite(bp);
162 } else
163 bdwrite(bp);
164 /* note that bp is already released here */
165 }
166 }
167 /*
168 * The first NDADDR blocks are direct blocks
169 */
170 if (lbn < NDADDR) {
171 nb = ip->i_db[lbn];
172 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
173 if (alloc_buffer) {
174 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
175 if (error) {
176 brelse(bp);
177 return (error);
178 }
179 *bpp = bp;
180 }
181 return (0);
182 }
183 if (nb != 0) {
184 /*
185 * Consider need to reallocate a fragment.
186 */
187 osize = fragroundup(fs, blkoff(fs, ip->i_size));
188 nsize = fragroundup(fs, size);
189 if (nsize <= osize) {
190 if (alloc_buffer) {
191 error = bread(vp, lbn, osize, NOCRED, &bp);
192 if (error) {
193 brelse(bp);
194 return (error);
195 }
196 ip->i_flag |= IN_CHANGE | IN_UPDATE;
197 *bpp = bp;
198 return (0);
199 }
200 else {
201 ip->i_flag |= IN_CHANGE | IN_UPDATE;
202 return (0);
203 }
204 } else {
205 error = ffs_realloccg(ip, lbn,
206 ffs_blkpref(ip, lbn, (int)lbn,
207 &ip->i_db[0]), osize, nsize, cred, &bp);
208 if (error)
209 return (error);
210 ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno);
211 ip->i_flag |= IN_CHANGE | IN_UPDATE;
212 if(!alloc_buffer) {
213 SET(bp->b_flags, B_NOCACHE);
214 if (flags & B_SYNC)
215 bwrite(bp);
216 else
217 bdwrite(bp);
218 } else
219 *bpp = bp;
220 return (0);
221
222 }
223 } else {
224 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
225 nsize = fragroundup(fs, size);
226 else
227 nsize = fs->fs_bsize;
228 error = ffs_alloc(ip, lbn,
229 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
230 nsize, cred, &newb);
231 if (error)
232 return (error);
233 if (alloc_buffer) {
234 bp = getblk(vp, lbn, nsize, 0, 0, BLK_WRITE);
235 bp->b_blkno = fsbtodb(fs, newb);
236 if (flags & B_CLRBUF)
237 clrbuf(bp);
238 }
239 ip->i_db[lbn] = newb;
240 ip->i_flag |= IN_CHANGE | IN_UPDATE;
241 if (blk_alloc) {
242 *blk_alloc = nsize;
243 }
244 if (alloc_buffer)
245 *bpp = bp;
246 return (0);
247 }
248 }
249 /*
250 * Determine the number of levels of indirection.
251 */
252 pref = 0;
253 if (error = ufs_getlbns(vp, lbn, indirs, &num))
254 return(error);
255 #if DIAGNOSTIC
256 if (num < 1)
257 panic ("ffs_balloc: ufs_bmaparray returned indirect block");
258 #endif
259 /*
260 * Fetch the first indirect block allocating if necessary.
261 */
262 --num;
263 nb = ip->i_ib[indirs[0].in_off];
264 allocib = NULL;
265 allocblk = allociblk;
266 if (nb == 0) {
267 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
268 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
269 cred, &newb))
270 return (error);
271 nb = newb;
272 *allocblk++ = nb;
273 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
274 bp->b_blkno = fsbtodb(fs, nb);
275 clrbuf(bp);
276 /*
277 * Write synchronously conditional on mount flags.
278 */
279 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
280 error = 0;
281 bdwrite(bp);
282 } else if ((error = bwrite(bp)) != 0) {
283 goto fail;
284 }
285 allocib = &ip->i_ib[indirs[0].in_off];
286 *allocib = nb;
287 ip->i_flag |= IN_CHANGE | IN_UPDATE;
288 }
289 /*
290 * Fetch through the indirect blocks, allocating as necessary.
291 */
292 for (i = 1;;) {
293 error = meta_bread(vp,
294 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
295 if (error) {
296 brelse(bp);
297 goto fail;
298 }
299 bap = (ufs_daddr_t *)bp->b_data;
300 #if REV_ENDIAN_FS
301 if (rev_endian)
302 nb = NXSwapLong(bap[indirs[i].in_off]);
303 else {
304 #endif /* REV_ENDIAN_FS */
305 nb = bap[indirs[i].in_off];
306 #if REV_ENDIAN_FS
307 }
308 #endif /* REV_ENDIAN_FS */
309 if (i == num)
310 break;
311 i += 1;
312 if (nb != 0) {
313 brelse(bp);
314 continue;
315 }
316 if (pref == 0)
317 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
318 if (error =
319 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
320 brelse(bp);
321 goto fail;
322 }
323 nb = newb;
324 *allocblk++ = nb;
325 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
326 nbp->b_blkno = fsbtodb(fs, nb);
327 clrbuf(nbp);
328 /*
329 * Write synchronously conditional on mount flags.
330 */
331 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
332 error = 0;
333 bdwrite(nbp);
334 } else if (error = bwrite(nbp)) {
335 brelse(bp);
336 goto fail;
337 }
338 #if REV_ENDIAN_FS
339 if (rev_endian)
340 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
341 else {
342 #endif /* REV_ENDIAN_FS */
343 bap[indirs[i - 1].in_off] = nb;
344 #if REV_ENDIAN_FS
345 }
346 #endif /* REV_ENDIAN_FS */
347 /*
348 * If required, write synchronously, otherwise use
349 * delayed write.
350 */
351 if (flags & B_SYNC) {
352 bwrite(bp);
353 } else {
354 bdwrite(bp);
355 }
356 }
357 /*
358 * Get the data block, allocating if necessary.
359 */
360 if (nb == 0) {
361 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
362 if (error = ffs_alloc(ip,
363 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
364 brelse(bp);
365 goto fail;
366 }
367 nb = newb;
368 *allocblk++ = nb;
369 #if REV_ENDIAN_FS
370 if (rev_endian)
371 bap[indirs[i].in_off] = NXSwapLong(nb);
372 else {
373 #endif /* REV_ENDIAN_FS */
374 bap[indirs[i].in_off] = nb;
375 #if REV_ENDIAN_FS
376 }
377 #endif /* REV_ENDIAN_FS */
378 /*
379 * If required, write synchronously, otherwise use
380 * delayed write.
381 */
382 if ((flags & B_SYNC)) {
383 bwrite(bp);
384 } else {
385 bdwrite(bp);
386 }
387 if(alloc_buffer ) {
388 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
389 nbp->b_blkno = fsbtodb(fs, nb);
390 if (flags & B_CLRBUF)
391 clrbuf(nbp);
392 }
393 if (blk_alloc) {
394 *blk_alloc = fs->fs_bsize;
395 }
396 if(alloc_buffer)
397 *bpp = nbp;
398
399 return (0);
400 }
401 brelse(bp);
402 if (alloc_buffer) {
403 if (flags & B_CLRBUF) {
404 error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
405 if (error) {
406 brelse(nbp);
407 goto fail;
408 }
409 } else {
410 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
411 nbp->b_blkno = fsbtodb(fs, nb);
412 }
413 *bpp = nbp;
414 }
415 return (0);
416 fail:
417 /*
418 * If we have failed part way through block allocation, we
419 * have to deallocate any indirect blocks that we have allocated.
420 */
421 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
422 ffs_blkfree(ip, *blkp, fs->fs_bsize);
423 deallocated += fs->fs_bsize;
424 }
425 if (allocib != NULL)
426 *allocib = 0;
427 if (deallocated) {
428 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
429
430 #if QUOTA
431 /*
432 * Restore user's disk quota because allocation failed.
433 */
434 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
435 #endif /* QUOTA */
436 ip->i_blocks -= btodb(deallocated, devBlockSize);
437 ip->i_flag |= IN_CHANGE | IN_UPDATE;
438 }
439 return (error);
440 }
441
442 /*
443 * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence
444 * it does no breads (that could lead to deadblock as the page may be already
445 * marked busy as it is being paged out. Also important to note that we are not
446 * growing the file in pageouts. So ip->i_size cannot increase by this call
447 * due to the way UBC works.
448 * This code is derived from ffs_balloc and many cases of that are dealt
449 * in ffs_balloc are not applicable here
450 * Do not call with B_CLRBUF flags as this should only be called only
451 * from pageouts
452 */
453 ffs_blkalloc(ip, lbn, size, cred, flags)
454 register struct inode *ip;
455 ufs_daddr_t lbn;
456 int size;
457 struct ucred *cred;
458 int flags;
459 {
460 register struct fs *fs;
461 register ufs_daddr_t nb;
462 struct buf *bp, *nbp;
463 struct vnode *vp = ITOV(ip);
464 struct indir indirs[NIADDR + 2];
465 ufs_daddr_t newb, *bap, pref;
466 int deallocated, osize, nsize, num, i, error;
467 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
468 int devBlockSize=0;
469 #if REV_ENDIAN_FS
470 struct mount *mp=vp->v_mount;
471 int rev_endian=(mp->mnt_flag & MNT_REVEND);
472 #endif /* REV_ENDIAN_FS */
473
474 fs = ip->i_fs;
475
476 if(size > fs->fs_bsize)
477 panic("ffs_blkalloc: too large for allocation");
478
479 /*
480 * If the next write will extend the file into a new block,
481 * and the file is currently composed of a fragment
482 * this fragment has to be extended to be a full block.
483 */
484 nb = lblkno(fs, ip->i_size);
485 if (nb < NDADDR && nb < lbn) {
486 panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d", ip->i_size, lbn);
487 }
488 /*
489 * The first NDADDR blocks are direct blocks
490 */
491 if (lbn < NDADDR) {
492 nb = ip->i_db[lbn];
493 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
494 /* TBD: trivial case; the block is already allocated */
495 return (0);
496 }
497 if (nb != 0) {
498 /*
499 * Consider need to reallocate a fragment.
500 */
501 osize = fragroundup(fs, blkoff(fs, ip->i_size));
502 nsize = fragroundup(fs, size);
503 if (nsize > osize) {
504 panic("ffs_allocblk: trying to extend a fragment");
505 }
506 return(0);
507 } else {
508 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
509 nsize = fragroundup(fs, size);
510 else
511 nsize = fs->fs_bsize;
512 error = ffs_alloc(ip, lbn,
513 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
514 nsize, cred, &newb);
515 if (error)
516 return (error);
517 ip->i_db[lbn] = newb;
518 ip->i_flag |= IN_CHANGE | IN_UPDATE;
519 return (0);
520 }
521 }
522 /*
523 * Determine the number of levels of indirection.
524 */
525 pref = 0;
526 if (error = ufs_getlbns(vp, lbn, indirs, &num))
527 return(error);
528
529 if(num == 0) {
530 panic("ffs_blkalloc: file with direct blocks only");
531 }
532
533 /*
534 * Fetch the first indirect block allocating if necessary.
535 */
536 --num;
537 nb = ip->i_ib[indirs[0].in_off];
538 allocib = NULL;
539 allocblk = allociblk;
540 if (nb == 0) {
541 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
542 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
543 cred, &newb))
544 return (error);
545 nb = newb;
546 *allocblk++ = nb;
547 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
548 bp->b_blkno = fsbtodb(fs, nb);
549 clrbuf(bp);
550 /*
551 * Write synchronously conditional on mount flags.
552 */
553 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
554 error = 0;
555 bdwrite(bp);
556 } else if (error = bwrite(bp)) {
557 goto fail;
558 }
559 allocib = &ip->i_ib[indirs[0].in_off];
560 *allocib = nb;
561 ip->i_flag |= IN_CHANGE | IN_UPDATE;
562 }
563 /*
564 * Fetch through the indirect blocks, allocating as necessary.
565 */
566 for (i = 1;;) {
567 error = meta_bread(vp,
568 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
569 if (error) {
570 brelse(bp);
571 goto fail;
572 }
573 bap = (ufs_daddr_t *)bp->b_data;
574 #if REV_ENDIAN_FS
575 if (rev_endian)
576 nb = NXSwapLong(bap[indirs[i].in_off]);
577 else {
578 #endif /* REV_ENDIAN_FS */
579 nb = bap[indirs[i].in_off];
580 #if REV_ENDIAN_FS
581 }
582 #endif /* REV_ENDIAN_FS */
583 if (i == num)
584 break;
585 i += 1;
586 if (nb != 0) {
587 brelse(bp);
588 continue;
589 }
590 if (pref == 0)
591 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
592 if (error =
593 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
594 brelse(bp);
595 goto fail;
596 }
597 nb = newb;
598 *allocblk++ = nb;
599 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
600 nbp->b_blkno = fsbtodb(fs, nb);
601 clrbuf(nbp);
602 /*
603 * Write synchronously conditional on mount flags.
604 */
605 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
606 error = 0;
607 bdwrite(nbp);
608 } else if (error = bwrite(nbp)) {
609 brelse(bp);
610 goto fail;
611 }
612 #if REV_ENDIAN_FS
613 if (rev_endian)
614 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
615 else {
616 #endif /* REV_ENDIAN_FS */
617 bap[indirs[i - 1].in_off] = nb;
618 #if REV_ENDIAN_FS
619 }
620 #endif /* REV_ENDIAN_FS */
621 /*
622 * If required, write synchronously, otherwise use
623 * delayed write.
624 */
625 if (flags & B_SYNC) {
626 bwrite(bp);
627 } else {
628 bdwrite(bp);
629 }
630 }
631 /*
632 * Get the data block, allocating if necessary.
633 */
634 if (nb == 0) {
635 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
636 if (error = ffs_alloc(ip,
637 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
638 brelse(bp);
639 goto fail;
640 }
641 nb = newb;
642 *allocblk++ = nb;
643 #if REV_ENDIAN_FS
644 if (rev_endian)
645 bap[indirs[i].in_off] = NXSwapLong(nb);
646 else {
647 #endif /* REV_ENDIAN_FS */
648 bap[indirs[i].in_off] = nb;
649 #if REV_ENDIAN_FS
650 }
651 #endif /* REV_ENDIAN_FS */
652 /*
653 * If required, write synchronously, otherwise use
654 * delayed write.
655 */
656 if (flags & B_SYNC) {
657 bwrite(bp);
658 } else {
659 bdwrite(bp);
660 }
661 return (0);
662 }
663 brelse(bp);
664 return (0);
665 fail:
666 /*
667 * If we have failed part way through block allocation, we
668 * have to deallocate any indirect blocks that we have allocated.
669 */
670 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
671 ffs_blkfree(ip, *blkp, fs->fs_bsize);
672 deallocated += fs->fs_bsize;
673 }
674 if (allocib != NULL)
675 *allocib = 0;
676 if (deallocated) {
677 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
678
679 #if QUOTA
680 /*
681 * Restore user's disk quota because allocation failed.
682 */
683 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
684 #endif /* QUOTA */
685 ip->i_blocks -= btodb(deallocated, devBlockSize);
686 ip->i_flag |= IN_CHANGE | IN_UPDATE;
687 }
688 return (error);
689 }