]> git.saurik.com Git - apple/xnu.git/blame - bsd/ufs/ffs/ffs_balloc.c
xnu-517.3.15.tar.gz
[apple/xnu.git] / bsd / ufs / ffs / ffs_balloc.c
CommitLineData
1c79356b 1/*
55e303ae 2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
26/*
27 * Copyright (c) 1982, 1986, 1989, 1993
28 * The Regents of the University of California. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
59 */
60
61#include <rev_endian_fs.h>
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/buf.h>
65#include <sys/proc.h>
66#include <sys/file.h>
67#include <sys/vnode.h>
68#include <sys/ubc.h>
9bccf70c
A
69#include <sys/quota.h>
70
1c79356b
A
71#if REV_ENDIAN_FS
72#include <sys/mount.h>
73#endif /* REV_ENDIAN_FS */
74
75#include <sys/vm.h>
76
77#include <ufs/ufs/quota.h>
78#include <ufs/ufs/inode.h>
79#include <ufs/ufs/ufs_extern.h>
80
81#include <ufs/ffs/fs.h>
82#include <ufs/ffs/ffs_extern.h>
83
84#if REV_ENDIAN_FS
85#include <ufs/ufs/ufs_byte_order.h>
86#include <architecture/byte_order.h>
87#endif /* REV_ENDIAN_FS */
88
89/*
90 * Balloc defines the structure of file system storage
91 * by allocating the physical blocks on a device given
92 * the inode and the logical block number in a file.
93 */
94ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc)
95 register struct inode *ip;
96 register ufs_daddr_t lbn;
97 int size;
98 struct ucred *cred;
99 struct buf **bpp;
100 int flags;
101 int * blk_alloc;
102{
103 register struct fs *fs;
104 register ufs_daddr_t nb;
105 struct buf *bp, *nbp;
106 struct vnode *vp = ITOV(ip);
107 struct indir indirs[NIADDR + 2];
108 ufs_daddr_t newb, *bap, pref;
109 int deallocated, osize, nsize, num, i, error;
110 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
111 int devBlockSize=0;
112 int alloc_buffer = 1;
113#if REV_ENDIAN_FS
114 struct mount *mp=vp->v_mount;
115 int rev_endian=(mp->mnt_flag & MNT_REVEND);
116#endif /* REV_ENDIAN_FS */
117
118 *bpp = NULL;
119 if (lbn < 0)
120 return (EFBIG);
121 fs = ip->i_fs;
122 if (flags & B_NOBUFF)
123 alloc_buffer = 0;
124
125 if (blk_alloc)
126 *blk_alloc = 0;
127
128 /*
129 * If the next write will extend the file into a new block,
130 * and the file is currently composed of a fragment
131 * this fragment has to be extended to be a full block.
132 */
133 nb = lblkno(fs, ip->i_size);
134 if (nb < NDADDR && nb < lbn) {
135 /* the filesize prior to this write can fit in direct
136 * blocks (ie. fragmentaion is possibly done)
137 * we are now extending the file write beyond
138 * the block which has end of file prior to this write
139 */
140 osize = blksize(fs, ip, nb);
141 /* osize gives disk allocated size in the last block. It is
142 * either in fragments or a file system block size */
143 if (osize < fs->fs_bsize && osize > 0) {
144 /* few fragments are already allocated,since the
145 * current extends beyond this block
146 * allocate the complete block as fragments are only
147 * in last block
148 */
149 error = ffs_realloccg(ip, nb,
150 ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
151 osize, (int)fs->fs_bsize, cred, &bp);
152 if (error)
153 return (error);
154 /* adjust the innode size we just grew */
155 /* it is in nb+1 as nb starts from 0 */
156 ip->i_size = (nb + 1) * fs->fs_bsize;
157 if (UBCISVALID(vp))
158 ubc_setsize(vp, (off_t)ip->i_size); /* XXX check error */
159 ip->i_db[nb] = dbtofsb(fs, bp->b_blkno);
160 ip->i_flag |= IN_CHANGE | IN_UPDATE;
161 if ((flags & B_SYNC) || (!alloc_buffer)) {
162 if (!alloc_buffer)
55e303ae 163 SET(bp->b_flags, B_NOCACHE);
1c79356b
A
164 bwrite(bp);
165 } else
55e303ae 166 bdwrite(bp);
1c79356b
A
167 /* note that bp is already released here */
168 }
169 }
170 /*
171 * The first NDADDR blocks are direct blocks
172 */
173 if (lbn < NDADDR) {
174 nb = ip->i_db[lbn];
175 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
176 if (alloc_buffer) {
177 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
178 if (error) {
179 brelse(bp);
180 return (error);
181 }
182 *bpp = bp;
183 }
184 return (0);
185 }
186 if (nb != 0) {
187 /*
188 * Consider need to reallocate a fragment.
189 */
190 osize = fragroundup(fs, blkoff(fs, ip->i_size));
191 nsize = fragroundup(fs, size);
192 if (nsize <= osize) {
193 if (alloc_buffer) {
194 error = bread(vp, lbn, osize, NOCRED, &bp);
195 if (error) {
196 brelse(bp);
197 return (error);
198 }
199 ip->i_flag |= IN_CHANGE | IN_UPDATE;
200 *bpp = bp;
201 return (0);
202 }
203 else {
204 ip->i_flag |= IN_CHANGE | IN_UPDATE;
205 return (0);
206 }
207 } else {
208 error = ffs_realloccg(ip, lbn,
209 ffs_blkpref(ip, lbn, (int)lbn,
210 &ip->i_db[0]), osize, nsize, cred, &bp);
211 if (error)
212 return (error);
213 ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno);
214 ip->i_flag |= IN_CHANGE | IN_UPDATE;
55e303ae
A
215 if(!alloc_buffer) {
216 SET(bp->b_flags, B_NOCACHE);
217 if (flags & B_SYNC)
218 bwrite(bp);
219 else
220 bdwrite(bp);
1c79356b
A
221 } else
222 *bpp = bp;
223 return (0);
224
225 }
226 } else {
227 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
228 nsize = fragroundup(fs, size);
229 else
230 nsize = fs->fs_bsize;
231 error = ffs_alloc(ip, lbn,
232 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
233 nsize, cred, &newb);
234 if (error)
235 return (error);
236 if (alloc_buffer) {
237 bp = getblk(vp, lbn, nsize, 0, 0, BLK_WRITE);
238 bp->b_blkno = fsbtodb(fs, newb);
239 if (flags & B_CLRBUF)
240 clrbuf(bp);
241 }
242 ip->i_db[lbn] = newb;
243 ip->i_flag |= IN_CHANGE | IN_UPDATE;
244 if (blk_alloc) {
245 *blk_alloc = nsize;
246 }
247 if (alloc_buffer)
248 *bpp = bp;
249 return (0);
250 }
251 }
252 /*
253 * Determine the number of levels of indirection.
254 */
255 pref = 0;
256 if (error = ufs_getlbns(vp, lbn, indirs, &num))
257 return(error);
258#if DIAGNOSTIC
259 if (num < 1)
55e303ae 260 panic ("ffs_balloc: ufs_bmaparray returned indirect block");
1c79356b
A
261#endif
262 /*
263 * Fetch the first indirect block allocating if necessary.
264 */
265 --num;
266 nb = ip->i_ib[indirs[0].in_off];
267 allocib = NULL;
268 allocblk = allociblk;
269 if (nb == 0) {
270 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
271 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
272 cred, &newb))
273 return (error);
274 nb = newb;
275 *allocblk++ = nb;
276 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
277 bp->b_blkno = fsbtodb(fs, nb);
278 clrbuf(bp);
279 /*
55e303ae 280 * Write synchronously conditional on mount flags.
1c79356b 281 */
55e303ae
A
282 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
283 error = 0;
284 bdwrite(bp);
285 } else if ((error = bwrite(bp)) != 0) {
1c79356b 286 goto fail;
55e303ae 287 }
1c79356b
A
288 allocib = &ip->i_ib[indirs[0].in_off];
289 *allocib = nb;
290 ip->i_flag |= IN_CHANGE | IN_UPDATE;
291 }
292 /*
293 * Fetch through the indirect blocks, allocating as necessary.
294 */
295 for (i = 1;;) {
296 error = meta_bread(vp,
297 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
298 if (error) {
299 brelse(bp);
300 goto fail;
301 }
302 bap = (ufs_daddr_t *)bp->b_data;
303#if REV_ENDIAN_FS
304 if (rev_endian)
305 nb = NXSwapLong(bap[indirs[i].in_off]);
306 else {
307#endif /* REV_ENDIAN_FS */
308 nb = bap[indirs[i].in_off];
309#if REV_ENDIAN_FS
310 }
311#endif /* REV_ENDIAN_FS */
312 if (i == num)
313 break;
314 i += 1;
315 if (nb != 0) {
316 brelse(bp);
317 continue;
318 }
319 if (pref == 0)
320 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
321 if (error =
322 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
323 brelse(bp);
324 goto fail;
325 }
326 nb = newb;
327 *allocblk++ = nb;
328 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
329 nbp->b_blkno = fsbtodb(fs, nb);
330 clrbuf(nbp);
331 /*
55e303ae 332 * Write synchronously conditional on mount flags.
1c79356b 333 */
55e303ae
A
334 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
335 error = 0;
336 bdwrite(nbp);
337 } else if (error = bwrite(nbp)) {
1c79356b
A
338 brelse(bp);
339 goto fail;
340 }
341#if REV_ENDIAN_FS
342 if (rev_endian)
343 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
344 else {
345#endif /* REV_ENDIAN_FS */
346 bap[indirs[i - 1].in_off] = nb;
347#if REV_ENDIAN_FS
348 }
349#endif /* REV_ENDIAN_FS */
350 /*
351 * If required, write synchronously, otherwise use
352 * delayed write.
353 */
354 if (flags & B_SYNC) {
355 bwrite(bp);
356 } else {
357 bdwrite(bp);
358 }
359 }
360 /*
361 * Get the data block, allocating if necessary.
362 */
363 if (nb == 0) {
364 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
365 if (error = ffs_alloc(ip,
366 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
367 brelse(bp);
368 goto fail;
369 }
370 nb = newb;
371 *allocblk++ = nb;
372#if REV_ENDIAN_FS
373 if (rev_endian)
374 bap[indirs[i].in_off] = NXSwapLong(nb);
375 else {
376#endif /* REV_ENDIAN_FS */
377 bap[indirs[i].in_off] = nb;
378#if REV_ENDIAN_FS
379 }
380#endif /* REV_ENDIAN_FS */
381 /*
382 * If required, write synchronously, otherwise use
383 * delayed write.
384 */
385 if ((flags & B_SYNC)) {
386 bwrite(bp);
387 } else {
388 bdwrite(bp);
389 }
390 if(alloc_buffer ) {
391 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
392 nbp->b_blkno = fsbtodb(fs, nb);
393 if (flags & B_CLRBUF)
394 clrbuf(nbp);
395 }
396 if (blk_alloc) {
397 *blk_alloc = fs->fs_bsize;
398 }
399 if(alloc_buffer)
400 *bpp = nbp;
401
402 return (0);
403 }
404 brelse(bp);
405 if (alloc_buffer) {
406 if (flags & B_CLRBUF) {
407 error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
408 if (error) {
409 brelse(nbp);
410 goto fail;
411 }
412 } else {
413 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
414 nbp->b_blkno = fsbtodb(fs, nb);
415 }
416 *bpp = nbp;
417 }
418 return (0);
419fail:
420 /*
421 * If we have failed part way through block allocation, we
422 * have to deallocate any indirect blocks that we have allocated.
423 */
424 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
425 ffs_blkfree(ip, *blkp, fs->fs_bsize);
426 deallocated += fs->fs_bsize;
427 }
428 if (allocib != NULL)
429 *allocib = 0;
430 if (deallocated) {
431 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
432
433#if QUOTA
434 /*
435 * Restore user's disk quota because allocation failed.
436 */
9bccf70c 437 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
1c79356b
A
438#endif /* QUOTA */
439 ip->i_blocks -= btodb(deallocated, devBlockSize);
440 ip->i_flag |= IN_CHANGE | IN_UPDATE;
441 }
442 return (error);
443}
444
445/*
446 * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence
447 * it does no breads (that could lead to deadblock as the page may be already
448 * marked busy as it is being paged out. Also important to note that we are not
449 * growing the file in pageouts. So ip->i_size cannot increase by this call
450 * due to the way UBC works.
451 * This code is derived from ffs_balloc and many cases of that are dealt
452 * in ffs_balloc are not applicable here
453 * Do not call with B_CLRBUF flags as this should only be called only
454 * from pageouts
455 */
456ffs_blkalloc(ip, lbn, size, cred, flags)
457 register struct inode *ip;
458 ufs_daddr_t lbn;
459 int size;
460 struct ucred *cred;
461 int flags;
462{
463 register struct fs *fs;
464 register ufs_daddr_t nb;
465 struct buf *bp, *nbp;
466 struct vnode *vp = ITOV(ip);
467 struct indir indirs[NIADDR + 2];
468 ufs_daddr_t newb, *bap, pref;
469 int deallocated, osize, nsize, num, i, error;
470 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
471 int devBlockSize=0;
472#if REV_ENDIAN_FS
473 struct mount *mp=vp->v_mount;
474 int rev_endian=(mp->mnt_flag & MNT_REVEND);
475#endif /* REV_ENDIAN_FS */
476
477 fs = ip->i_fs;
478
479 if(size > fs->fs_bsize)
55e303ae 480 panic("ffs_blkalloc: too large for allocation");
1c79356b
A
481
482 /*
483 * If the next write will extend the file into a new block,
484 * and the file is currently composed of a fragment
485 * this fragment has to be extended to be a full block.
486 */
487 nb = lblkno(fs, ip->i_size);
488 if (nb < NDADDR && nb < lbn) {
55e303ae 489 panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d", ip->i_size, lbn);
1c79356b
A
490 }
491 /*
492 * The first NDADDR blocks are direct blocks
493 */
494 if (lbn < NDADDR) {
495 nb = ip->i_db[lbn];
496 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
497 /* TBD: trivial case; the block is already allocated */
498 return (0);
499 }
500 if (nb != 0) {
501 /*
502 * Consider need to reallocate a fragment.
503 */
504 osize = fragroundup(fs, blkoff(fs, ip->i_size));
505 nsize = fragroundup(fs, size);
506 if (nsize > osize) {
55e303ae 507 panic("ffs_allocblk: trying to extend a fragment");
1c79356b
A
508 }
509 return(0);
510 } else {
511 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
512 nsize = fragroundup(fs, size);
513 else
514 nsize = fs->fs_bsize;
515 error = ffs_alloc(ip, lbn,
516 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
517 nsize, cred, &newb);
518 if (error)
519 return (error);
520 ip->i_db[lbn] = newb;
521 ip->i_flag |= IN_CHANGE | IN_UPDATE;
522 return (0);
523 }
524 }
525 /*
526 * Determine the number of levels of indirection.
527 */
528 pref = 0;
529 if (error = ufs_getlbns(vp, lbn, indirs, &num))
530 return(error);
531
532 if(num == 0) {
55e303ae 533 panic("ffs_blkalloc: file with direct blocks only");
1c79356b
A
534 }
535
536 /*
537 * Fetch the first indirect block allocating if necessary.
538 */
539 --num;
540 nb = ip->i_ib[indirs[0].in_off];
541 allocib = NULL;
542 allocblk = allociblk;
543 if (nb == 0) {
544 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
545 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
546 cred, &newb))
547 return (error);
548 nb = newb;
549 *allocblk++ = nb;
550 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
551 bp->b_blkno = fsbtodb(fs, nb);
552 clrbuf(bp);
553 /*
55e303ae 554 * Write synchronously conditional on mount flags.
1c79356b 555 */
55e303ae
A
556 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
557 error = 0;
558 bdwrite(bp);
559 } else if (error = bwrite(bp)) {
1c79356b 560 goto fail;
55e303ae 561 }
1c79356b
A
562 allocib = &ip->i_ib[indirs[0].in_off];
563 *allocib = nb;
564 ip->i_flag |= IN_CHANGE | IN_UPDATE;
565 }
566 /*
567 * Fetch through the indirect blocks, allocating as necessary.
568 */
569 for (i = 1;;) {
570 error = meta_bread(vp,
571 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
572 if (error) {
573 brelse(bp);
574 goto fail;
575 }
576 bap = (ufs_daddr_t *)bp->b_data;
577#if REV_ENDIAN_FS
578 if (rev_endian)
579 nb = NXSwapLong(bap[indirs[i].in_off]);
580 else {
581#endif /* REV_ENDIAN_FS */
582 nb = bap[indirs[i].in_off];
583#if REV_ENDIAN_FS
584 }
585#endif /* REV_ENDIAN_FS */
586 if (i == num)
587 break;
588 i += 1;
589 if (nb != 0) {
590 brelse(bp);
591 continue;
592 }
593 if (pref == 0)
594 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
595 if (error =
596 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
597 brelse(bp);
598 goto fail;
599 }
600 nb = newb;
601 *allocblk++ = nb;
602 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
603 nbp->b_blkno = fsbtodb(fs, nb);
604 clrbuf(nbp);
605 /*
55e303ae 606 * Write synchronously conditional on mount flags.
1c79356b 607 */
55e303ae
A
608 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
609 error = 0;
610 bdwrite(nbp);
611 } else if (error = bwrite(nbp)) {
1c79356b
A
612 brelse(bp);
613 goto fail;
614 }
615#if REV_ENDIAN_FS
616 if (rev_endian)
617 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
618 else {
619#endif /* REV_ENDIAN_FS */
620 bap[indirs[i - 1].in_off] = nb;
621#if REV_ENDIAN_FS
622 }
623#endif /* REV_ENDIAN_FS */
624 /*
625 * If required, write synchronously, otherwise use
626 * delayed write.
627 */
628 if (flags & B_SYNC) {
629 bwrite(bp);
630 } else {
631 bdwrite(bp);
632 }
633 }
634 /*
635 * Get the data block, allocating if necessary.
636 */
637 if (nb == 0) {
638 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
639 if (error = ffs_alloc(ip,
640 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
641 brelse(bp);
642 goto fail;
643 }
644 nb = newb;
645 *allocblk++ = nb;
646#if REV_ENDIAN_FS
647 if (rev_endian)
648 bap[indirs[i].in_off] = NXSwapLong(nb);
649 else {
650#endif /* REV_ENDIAN_FS */
651 bap[indirs[i].in_off] = nb;
652#if REV_ENDIAN_FS
653 }
654#endif /* REV_ENDIAN_FS */
655 /*
656 * If required, write synchronously, otherwise use
657 * delayed write.
658 */
659 if (flags & B_SYNC) {
660 bwrite(bp);
661 } else {
662 bdwrite(bp);
663 }
664 return (0);
665 }
666 brelse(bp);
667 return (0);
668fail:
669 /*
670 * If we have failed part way through block allocation, we
671 * have to deallocate any indirect blocks that we have allocated.
672 */
673 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
674 ffs_blkfree(ip, *blkp, fs->fs_bsize);
675 deallocated += fs->fs_bsize;
676 }
677 if (allocib != NULL)
678 *allocib = 0;
679 if (deallocated) {
680 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
681
682#if QUOTA
683 /*
684 * Restore user's disk quota because allocation failed.
685 */
9bccf70c 686 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
1c79356b
A
687#endif /* QUOTA */
688 ip->i_blocks -= btodb(deallocated, devBlockSize);
689 ip->i_flag |= IN_CHANGE | IN_UPDATE;
690 }
691 return (error);
692}