]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/ufs/ffs/ffs_balloc.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / bsd / ufs / ffs / ffs_balloc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
56 */
57
58#include <rev_endian_fs.h>
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/buf_internal.h>
62#include <sys/proc.h>
63#include <sys/kauth.h>
64#include <sys/file.h>
65#include <sys/vnode_internal.h>
66#include <sys/ubc.h>
67#include <sys/quota.h>
68
69#if REV_ENDIAN_FS
70#include <sys/mount_internal.h>
71#endif /* REV_ENDIAN_FS */
72
73#include <sys/vm.h>
74
75#include <ufs/ufs/quota.h>
76#include <ufs/ufs/inode.h>
77#include <ufs/ufs/ufs_extern.h>
78
79#include <ufs/ffs/fs.h>
80#include <ufs/ffs/ffs_extern.h>
81
82#if REV_ENDIAN_FS
83#include <ufs/ufs/ufs_byte_order.h>
84#include <architecture/byte_order.h>
85#endif /* REV_ENDIAN_FS */
86
87/*
88 * Balloc defines the structure of file system storage
89 * by allocating the physical blocks on a device given
90 * the inode and the logical block number in a file.
91 */
92ffs_balloc(
93 register struct inode *ip,
94 register ufs_daddr_t lbn,
95 int size,
96 kauth_cred_t cred,
97 struct buf **bpp,
98 int flags,
99 int * blk_alloc)
100{
101 register struct fs *fs;
102 register ufs_daddr_t nb;
103 struct buf *bp, *nbp;
104 struct vnode *vp = ITOV(ip);
105 struct indir indirs[NIADDR + 2];
106 ufs_daddr_t newb, *bap, pref;
107 int deallocated, osize, nsize, num, i, error;
108 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
109 int devBlockSize=0;
110 int alloc_buffer = 1;
111 struct mount *mp=vp->v_mount;
112#if REV_ENDIAN_FS
113 int rev_endian=(mp->mnt_flag & MNT_REVEND);
114#endif /* REV_ENDIAN_FS */
115
116 *bpp = NULL;
117 if (lbn < 0)
118 return (EFBIG);
119 fs = ip->i_fs;
120 if (flags & B_NOBUFF)
121 alloc_buffer = 0;
122
123 if (blk_alloc)
124 *blk_alloc = 0;
125
126 /*
127 * If the next write will extend the file into a new block,
128 * and the file is currently composed of a fragment
129 * this fragment has to be extended to be a full block.
130 */
131 nb = lblkno(fs, ip->i_size);
132 if (nb < NDADDR && nb < lbn) {
133 /* the filesize prior to this write can fit in direct
134 * blocks (ie. fragmentaion is possibly done)
135 * we are now extending the file write beyond
136 * the block which has end of file prior to this write
137 */
138 osize = blksize(fs, ip, nb);
139 /* osize gives disk allocated size in the last block. It is
140 * either in fragments or a file system block size */
141 if (osize < fs->fs_bsize && osize > 0) {
142 /* few fragments are already allocated,since the
143 * current extends beyond this block
144 * allocate the complete block as fragments are only
145 * in last block
146 */
147 error = ffs_realloccg(ip, nb,
148 ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
149 osize, (int)fs->fs_bsize, cred, &bp);
150 if (error)
151 return (error);
152 /* adjust the inode size we just grew */
153 /* it is in nb+1 as nb starts from 0 */
154 ip->i_size = (nb + 1) * fs->fs_bsize;
155 ubc_setsize(vp, (off_t)ip->i_size);
156
157 ip->i_db[nb] = dbtofsb(fs, (ufs_daddr_t)buf_blkno(bp));
158 ip->i_flag |= IN_CHANGE | IN_UPDATE;
159
160 if ((flags & B_SYNC) || (!alloc_buffer)) {
161 if (!alloc_buffer)
162 buf_setflags(bp, B_NOCACHE);
163 buf_bwrite(bp);
164 } else
165 buf_bdwrite(bp);
166 /* note that bp is already released here */
167 }
168 }
169 /*
170 * The first NDADDR blocks are direct blocks
171 */
172 if (lbn < NDADDR) {
173 nb = ip->i_db[lbn];
174 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
175 if (alloc_buffer) {
176 error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, NOCRED, &bp);
177 if (error) {
178 buf_brelse(bp);
179 return (error);
180 }
181 *bpp = bp;
182 }
183 return (0);
184 }
185 if (nb != 0) {
186 /*
187 * Consider need to reallocate a fragment.
188 */
189 osize = fragroundup(fs, blkoff(fs, ip->i_size));
190 nsize = fragroundup(fs, size);
191 if (nsize <= osize) {
192 if (alloc_buffer) {
193 error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), osize, NOCRED, &bp);
194 if (error) {
195 buf_brelse(bp);
196 return (error);
197 }
198 ip->i_flag |= IN_CHANGE | IN_UPDATE;
199 *bpp = bp;
200 return (0);
201 }
202 else {
203 ip->i_flag |= IN_CHANGE | IN_UPDATE;
204 return (0);
205 }
206 } else {
207 error = ffs_realloccg(ip, lbn,
208 ffs_blkpref(ip, lbn, (int)lbn,
209 &ip->i_db[0]), osize, nsize, cred, &bp);
210 if (error)
211 return (error);
212 ip->i_db[lbn] = dbtofsb(fs, (ufs_daddr_t)buf_blkno(bp));
213 ip->i_flag |= IN_CHANGE | IN_UPDATE;
214
215 /* adjust the inode size we just grew */
216 ip->i_size = (lbn * fs->fs_bsize) + size;
217 ubc_setsize(vp, (off_t)ip->i_size);
218
219 if (!alloc_buffer) {
220 buf_setflags(bp, B_NOCACHE);
221 if (flags & B_SYNC)
222 buf_bwrite(bp);
223 else
224 buf_bdwrite(bp);
225 } else
226 *bpp = bp;
227 return (0);
228
229 }
230 } else {
231 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
232 nsize = fragroundup(fs, size);
233 else
234 nsize = fs->fs_bsize;
235 error = ffs_alloc(ip, lbn,
236 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
237 nsize, cred, &newb);
238 if (error)
239 return (error);
240 if (alloc_buffer) {
241 bp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), nsize, 0, 0, BLK_WRITE);
242 buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, newb)));
243
244 if (flags & B_CLRBUF)
245 buf_clear(bp);
246 }
247 ip->i_db[lbn] = newb;
248 ip->i_flag |= IN_CHANGE | IN_UPDATE;
249 if (blk_alloc) {
250 *blk_alloc = nsize;
251 }
252 if (alloc_buffer)
253 *bpp = bp;
254 return (0);
255 }
256 }
257 /*
258 * Determine the number of levels of indirection.
259 */
260 pref = 0;
261 if (error = ufs_getlbns(vp, lbn, indirs, &num))
262 return(error);
263#if DIAGNOSTIC
264 if (num < 1)
265 panic ("ffs_balloc: ufs_bmaparray returned indirect block");
266#endif
267 /*
268 * Fetch the first indirect block allocating if necessary.
269 */
270 --num;
271 nb = ip->i_ib[indirs[0].in_off];
272 allocib = NULL;
273 allocblk = allociblk;
274 if (nb == 0) {
275 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
276 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
277 cred, &newb))
278 return (error);
279 nb = newb;
280 *allocblk++ = nb;
281 bp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[1].in_lbn)), fs->fs_bsize, 0, 0, BLK_META);
282 buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
283 buf_clear(bp);
284 /*
285 * Write synchronously conditional on mount flags.
286 */
287 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
288 error = 0;
289 buf_bdwrite(bp);
290 } else if ((error = buf_bwrite(bp)) != 0) {
291 goto fail;
292 }
293 allocib = &ip->i_ib[indirs[0].in_off];
294 *allocib = nb;
295 ip->i_flag |= IN_CHANGE | IN_UPDATE;
296 }
297 /*
298 * Fetch through the indirect blocks, allocating as necessary.
299 */
300 for (i = 1;;) {
301 error = (int)buf_meta_bread(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), (int)fs->fs_bsize, NOCRED, &bp);
302 if (error) {
303 buf_brelse(bp);
304 goto fail;
305 }
306 bap = (ufs_daddr_t *)buf_dataptr(bp);
307#if REV_ENDIAN_FS
308 if (rev_endian)
309 nb = NXSwapLong(bap[indirs[i].in_off]);
310 else {
311#endif /* REV_ENDIAN_FS */
312 nb = bap[indirs[i].in_off];
313#if REV_ENDIAN_FS
314 }
315#endif /* REV_ENDIAN_FS */
316 if (i == num)
317 break;
318 i += 1;
319 if (nb != 0) {
320 buf_brelse(bp);
321 continue;
322 }
323 if (pref == 0)
324 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
325 if (error =
326 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
327 buf_brelse(bp);
328 goto fail;
329 }
330 nb = newb;
331 *allocblk++ = nb;
332 nbp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), fs->fs_bsize, 0, 0, BLK_META);
333 buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
334 buf_clear(nbp);
335 /*
336 * Write synchronously conditional on mount flags.
337 */
338 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
339 error = 0;
340 buf_bdwrite(nbp);
341 } else if (error = buf_bwrite(nbp)) {
342 buf_brelse(bp);
343 goto fail;
344 }
345#if REV_ENDIAN_FS
346 if (rev_endian)
347 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
348 else {
349#endif /* REV_ENDIAN_FS */
350 bap[indirs[i - 1].in_off] = nb;
351#if REV_ENDIAN_FS
352 }
353#endif /* REV_ENDIAN_FS */
354 /*
355 * If required, write synchronously, otherwise use
356 * delayed write.
357 */
358 if (flags & B_SYNC) {
359 buf_bwrite(bp);
360 } else {
361 buf_bdwrite(bp);
362 }
363 }
364 /*
365 * Get the data block, allocating if necessary.
366 */
367 if (nb == 0) {
368 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
369 if (error = ffs_alloc(ip,
370 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
371 buf_brelse(bp);
372 goto fail;
373 }
374 nb = newb;
375 *allocblk++ = nb;
376#if REV_ENDIAN_FS
377 if (rev_endian)
378 bap[indirs[i].in_off] = NXSwapLong(nb);
379 else {
380#endif /* REV_ENDIAN_FS */
381 bap[indirs[i].in_off] = nb;
382#if REV_ENDIAN_FS
383 }
384#endif /* REV_ENDIAN_FS */
385 /*
386 * If required, write synchronously, otherwise use
387 * delayed write.
388 */
389 if ((flags & B_SYNC)) {
390 buf_bwrite(bp);
391 } else {
392 buf_bdwrite(bp);
393 }
394 if(alloc_buffer ) {
395 nbp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, 0, 0, BLK_WRITE);
396 buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
397
398 if (flags & B_CLRBUF)
399 buf_clear(nbp);
400 }
401 if (blk_alloc) {
402 *blk_alloc = fs->fs_bsize;
403 }
404 if(alloc_buffer)
405 *bpp = nbp;
406
407 return (0);
408 }
409 buf_brelse(bp);
410 if (alloc_buffer) {
411 if (flags & B_CLRBUF) {
412 error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), (int)fs->fs_bsize, NOCRED, &nbp);
413 if (error) {
414 buf_brelse(nbp);
415 goto fail;
416 }
417 } else {
418 nbp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, 0, 0, BLK_WRITE);
419 buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
420 }
421 *bpp = nbp;
422 }
423 return (0);
424fail:
425 /*
426 * If we have failed part way through block allocation, we
427 * have to deallocate any indirect blocks that we have allocated.
428 */
429 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
430 ffs_blkfree(ip, *blkp, fs->fs_bsize);
431 deallocated += fs->fs_bsize;
432 }
433 if (allocib != NULL)
434 *allocib = 0;
435 if (deallocated) {
436 devBlockSize = vfs_devblocksize(mp);
437#if QUOTA
438 /*
439 * Restore user's disk quota because allocation failed.
440 */
441 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
442#endif /* QUOTA */
443 ip->i_blocks -= btodb(deallocated, devBlockSize);
444 ip->i_flag |= IN_CHANGE | IN_UPDATE;
445 }
446 return (error);
447}
448
449/*
450 * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence
451 * it does no buf_breads (that could lead to deadblock as the page may be already
452 * marked busy as it is being paged out. Also important to note that we are not
453 * growing the file in pageouts. So ip->i_size cannot increase by this call
454 * due to the way UBC works.
455 * This code is derived from ffs_balloc and many cases of that are dealt
456 * in ffs_balloc are not applicable here
457 * Do not call with B_CLRBUF flags as this should only be called only
458 * from pageouts
459 */
460ffs_blkalloc(
461 struct inode *ip,
462 ufs_daddr_t lbn,
463 int size,
464 kauth_cred_t cred,
465 int flags)
466{
467 register struct fs *fs;
468 register ufs_daddr_t nb;
469 struct buf *bp, *nbp;
470 struct vnode *vp = ITOV(ip);
471 struct indir indirs[NIADDR + 2];
472 ufs_daddr_t newb, *bap, pref;
473 int deallocated, osize, nsize, num, i, error;
474 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
475 int devBlockSize=0;
476 struct mount *mp=vp->v_mount;
477#if REV_ENDIAN_FS
478 int rev_endian=(mp->mnt_flag & MNT_REVEND);
479#endif /* REV_ENDIAN_FS */
480
481 fs = ip->i_fs;
482
483 if(size > fs->fs_bsize)
484 panic("ffs_blkalloc: too large for allocation");
485
486 /*
487 * If the next write will extend the file into a new block,
488 * and the file is currently composed of a fragment
489 * this fragment has to be extended to be a full block.
490 */
491 nb = lblkno(fs, ip->i_size);
492 if (nb < NDADDR && nb < lbn) {
493 panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d", ip->i_size, lbn);
494 }
495 /*
496 * The first NDADDR blocks are direct blocks
497 */
498 if (lbn < NDADDR) {
499 nb = ip->i_db[lbn];
500 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
501 /* TBD: trivial case; the block is already allocated */
502 return (0);
503 }
504 if (nb != 0) {
505 /*
506 * Consider need to reallocate a fragment.
507 */
508 osize = fragroundup(fs, blkoff(fs, ip->i_size));
509 nsize = fragroundup(fs, size);
510 if (nsize > osize) {
511 panic("ffs_allocblk: trying to extend a fragment");
512 }
513 return(0);
514 } else {
515 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
516 nsize = fragroundup(fs, size);
517 else
518 nsize = fs->fs_bsize;
519 error = ffs_alloc(ip, lbn,
520 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
521 nsize, cred, &newb);
522 if (error)
523 return (error);
524 ip->i_db[lbn] = newb;
525 ip->i_flag |= IN_CHANGE | IN_UPDATE;
526 return (0);
527 }
528 }
529 /*
530 * Determine the number of levels of indirection.
531 */
532 pref = 0;
533 if (error = ufs_getlbns(vp, lbn, indirs, &num))
534 return(error);
535
536 if(num == 0) {
537 panic("ffs_blkalloc: file with direct blocks only");
538 }
539
540 /*
541 * Fetch the first indirect block allocating if necessary.
542 */
543 --num;
544 nb = ip->i_ib[indirs[0].in_off];
545 allocib = NULL;
546 allocblk = allociblk;
547 if (nb == 0) {
548 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
549 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
550 cred, &newb))
551 return (error);
552 nb = newb;
553 *allocblk++ = nb;
554 bp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[1].in_lbn)), fs->fs_bsize, 0, 0, BLK_META);
555 buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
556 buf_clear(bp);
557 /*
558 * Write synchronously conditional on mount flags.
559 */
560 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
561 error = 0;
562 buf_bdwrite(bp);
563 } else if (error = buf_bwrite(bp)) {
564 goto fail;
565 }
566 allocib = &ip->i_ib[indirs[0].in_off];
567 *allocib = nb;
568 ip->i_flag |= IN_CHANGE | IN_UPDATE;
569 }
570 /*
571 * Fetch through the indirect blocks, allocating as necessary.
572 */
573 for (i = 1;;) {
574 error = (int)buf_meta_bread(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), (int)fs->fs_bsize, NOCRED, &bp);
575 if (error) {
576 buf_brelse(bp);
577 goto fail;
578 }
579 bap = (ufs_daddr_t *)buf_dataptr(bp);
580#if REV_ENDIAN_FS
581 if (rev_endian)
582 nb = NXSwapLong(bap[indirs[i].in_off]);
583 else {
584#endif /* REV_ENDIAN_FS */
585 nb = bap[indirs[i].in_off];
586#if REV_ENDIAN_FS
587 }
588#endif /* REV_ENDIAN_FS */
589 if (i == num)
590 break;
591 i += 1;
592 if (nb != 0) {
593 buf_brelse(bp);
594 continue;
595 }
596 if (pref == 0)
597 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
598 if (error =
599 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
600 buf_brelse(bp);
601 goto fail;
602 }
603 nb = newb;
604 *allocblk++ = nb;
605 nbp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), fs->fs_bsize, 0, 0, BLK_META);
606 buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
607 buf_clear(nbp);
608 /*
609 * Write synchronously conditional on mount flags.
610 */
611 if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
612 error = 0;
613 buf_bdwrite(nbp);
614 } else if (error = buf_bwrite(nbp)) {
615 buf_brelse(bp);
616 goto fail;
617 }
618#if REV_ENDIAN_FS
619 if (rev_endian)
620 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
621 else {
622#endif /* REV_ENDIAN_FS */
623 bap[indirs[i - 1].in_off] = nb;
624#if REV_ENDIAN_FS
625 }
626#endif /* REV_ENDIAN_FS */
627 /*
628 * If required, write synchronously, otherwise use
629 * delayed write.
630 */
631 if (flags & B_SYNC) {
632 buf_bwrite(bp);
633 } else {
634 buf_bdwrite(bp);
635 }
636 }
637 /*
638 * Get the data block, allocating if necessary.
639 */
640 if (nb == 0) {
641 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
642 if (error = ffs_alloc(ip,
643 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
644 buf_brelse(bp);
645 goto fail;
646 }
647 nb = newb;
648 *allocblk++ = nb;
649#if REV_ENDIAN_FS
650 if (rev_endian)
651 bap[indirs[i].in_off] = NXSwapLong(nb);
652 else {
653#endif /* REV_ENDIAN_FS */
654 bap[indirs[i].in_off] = nb;
655#if REV_ENDIAN_FS
656 }
657#endif /* REV_ENDIAN_FS */
658 /*
659 * If required, write synchronously, otherwise use
660 * delayed write.
661 */
662 if (flags & B_SYNC) {
663 buf_bwrite(bp);
664 } else {
665 buf_bdwrite(bp);
666 }
667 return (0);
668 }
669 buf_brelse(bp);
670 return (0);
671fail:
672 /*
673 * If we have failed part way through block allocation, we
674 * have to deallocate any indirect blocks that we have allocated.
675 */
676 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
677 ffs_blkfree(ip, *blkp, fs->fs_bsize);
678 deallocated += fs->fs_bsize;
679 }
680 if (allocib != NULL)
681 *allocib = 0;
682 if (deallocated) {
683 devBlockSize = vfs_devblocksize(mp);
684#if QUOTA
685 /*
686 * Restore user's disk quota because allocation failed.
687 */
688 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
689#endif /* QUOTA */
690 ip->i_blocks -= btodb(deallocated, devBlockSize);
691 ip->i_flag |= IN_CHANGE | IN_UPDATE;
692 }
693 return (error);
694}