]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ffs/ffs_balloc.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / bsd / ufs / ffs / ffs_balloc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
26 /*
27 * Copyright (c) 1982, 1986, 1989, 1993
28 * The Regents of the University of California. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
59 */
60
61 #include <rev_endian_fs.h>
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/buf.h>
65 #include <sys/proc.h>
66 #include <sys/file.h>
67 #include <sys/vnode.h>
68 #include <sys/ubc.h>
69 #include <sys/quota.h>
70
71 #if REV_ENDIAN_FS
72 #include <sys/mount.h>
73 #endif /* REV_ENDIAN_FS */
74
75 #include <sys/vm.h>
76
77 #include <ufs/ufs/quota.h>
78 #include <ufs/ufs/inode.h>
79 #include <ufs/ufs/ufs_extern.h>
80
81 #include <ufs/ffs/fs.h>
82 #include <ufs/ffs/ffs_extern.h>
83
84 #if REV_ENDIAN_FS
85 #include <ufs/ufs/ufs_byte_order.h>
86 #include <architecture/byte_order.h>
87 #endif /* REV_ENDIAN_FS */
88
89 /*
90 * Balloc defines the structure of file system storage
91 * by allocating the physical blocks on a device given
92 * the inode and the logical block number in a file.
93 */
94 ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc)
95 register struct inode *ip;
96 register ufs_daddr_t lbn;
97 int size;
98 struct ucred *cred;
99 struct buf **bpp;
100 int flags;
101 int * blk_alloc;
102 {
103 register struct fs *fs;
104 register ufs_daddr_t nb;
105 struct buf *bp, *nbp;
106 struct vnode *vp = ITOV(ip);
107 struct indir indirs[NIADDR + 2];
108 ufs_daddr_t newb, *bap, pref;
109 int deallocated, osize, nsize, num, i, error;
110 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
111 int devBlockSize=0;
112 int alloc_buffer = 1;
113 #if REV_ENDIAN_FS
114 struct mount *mp=vp->v_mount;
115 int rev_endian=(mp->mnt_flag & MNT_REVEND);
116 #endif /* REV_ENDIAN_FS */
117
118 *bpp = NULL;
119 if (lbn < 0)
120 return (EFBIG);
121 fs = ip->i_fs;
122 if (flags & B_NOBUFF)
123 alloc_buffer = 0;
124
125 if (blk_alloc)
126 *blk_alloc = 0;
127
128 /*
129 * If the next write will extend the file into a new block,
130 * and the file is currently composed of a fragment
131 * this fragment has to be extended to be a full block.
132 */
133 nb = lblkno(fs, ip->i_size);
134 if (nb < NDADDR && nb < lbn) {
135 /* the filesize prior to this write can fit in direct
136 * blocks (ie. fragmentaion is possibly done)
137 * we are now extending the file write beyond
138 * the block which has end of file prior to this write
139 */
140 osize = blksize(fs, ip, nb);
141 /* osize gives disk allocated size in the last block. It is
142 * either in fragments or a file system block size */
143 if (osize < fs->fs_bsize && osize > 0) {
144 /* few fragments are already allocated,since the
145 * current extends beyond this block
146 * allocate the complete block as fragments are only
147 * in last block
148 */
149 error = ffs_realloccg(ip, nb,
150 ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
151 osize, (int)fs->fs_bsize, cred, &bp);
152 if (error)
153 return (error);
154 /* adjust the innode size we just grew */
155 /* it is in nb+1 as nb starts from 0 */
156 ip->i_size = (nb + 1) * fs->fs_bsize;
157 if (UBCISVALID(vp))
158 ubc_setsize(vp, (off_t)ip->i_size); /* XXX check error */
159 ip->i_db[nb] = dbtofsb(fs, bp->b_blkno);
160 ip->i_flag |= IN_CHANGE | IN_UPDATE;
161 if ((flags & B_SYNC) || (!alloc_buffer)) {
162 if (!alloc_buffer)
163 SET(bp->b_flags, B_INVAL);
164 bwrite(bp);
165 } else
166 bawrite(bp);
167 /* note that bp is already released here */
168 }
169 }
170 /*
171 * The first NDADDR blocks are direct blocks
172 */
173 if (lbn < NDADDR) {
174 nb = ip->i_db[lbn];
175 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
176 if (alloc_buffer) {
177 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
178 if (error) {
179 brelse(bp);
180 return (error);
181 }
182 *bpp = bp;
183 }
184 return (0);
185 }
186 if (nb != 0) {
187 /*
188 * Consider need to reallocate a fragment.
189 */
190 osize = fragroundup(fs, blkoff(fs, ip->i_size));
191 nsize = fragroundup(fs, size);
192 if (nsize <= osize) {
193 if (alloc_buffer) {
194 error = bread(vp, lbn, osize, NOCRED, &bp);
195 if (error) {
196 brelse(bp);
197 return (error);
198 }
199 ip->i_flag |= IN_CHANGE | IN_UPDATE;
200 *bpp = bp;
201 return (0);
202 }
203 else {
204 ip->i_flag |= IN_CHANGE | IN_UPDATE;
205 return (0);
206 }
207 } else {
208 error = ffs_realloccg(ip, lbn,
209 ffs_blkpref(ip, lbn, (int)lbn,
210 &ip->i_db[0]), osize, nsize, cred, &bp);
211 if (error)
212 return (error);
213 ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno);
214 ip->i_flag |= IN_CHANGE | IN_UPDATE;
215 if(!alloc_buffer) {
216 SET(bp->b_flags, B_INVAL);
217 bwrite(bp);
218 } else
219 *bpp = bp;
220 return (0);
221
222 }
223 } else {
224 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
225 nsize = fragroundup(fs, size);
226 else
227 nsize = fs->fs_bsize;
228 error = ffs_alloc(ip, lbn,
229 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
230 nsize, cred, &newb);
231 if (error)
232 return (error);
233 if (alloc_buffer) {
234 bp = getblk(vp, lbn, nsize, 0, 0, BLK_WRITE);
235 bp->b_blkno = fsbtodb(fs, newb);
236 if (flags & B_CLRBUF)
237 clrbuf(bp);
238 }
239 ip->i_db[lbn] = newb;
240 ip->i_flag |= IN_CHANGE | IN_UPDATE;
241 if (blk_alloc) {
242 *blk_alloc = nsize;
243 }
244 if (alloc_buffer)
245 *bpp = bp;
246 return (0);
247 }
248 }
249 /*
250 * Determine the number of levels of indirection.
251 */
252 pref = 0;
253 if (error = ufs_getlbns(vp, lbn, indirs, &num))
254 return(error);
255 #if DIAGNOSTIC
256 if (num < 1)
257 panic ("ffs_balloc: ufs_bmaparray returned indirect block\n");
258 #endif
259 /*
260 * Fetch the first indirect block allocating if necessary.
261 */
262 --num;
263 nb = ip->i_ib[indirs[0].in_off];
264 allocib = NULL;
265 allocblk = allociblk;
266 if (nb == 0) {
267 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
268 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
269 cred, &newb))
270 return (error);
271 nb = newb;
272 *allocblk++ = nb;
273 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
274 bp->b_blkno = fsbtodb(fs, nb);
275 clrbuf(bp);
276 /*
277 * Write synchronously so that indirect blocks
278 * never point at garbage.
279 */
280 if (error = bwrite(bp))
281 goto fail;
282 allocib = &ip->i_ib[indirs[0].in_off];
283 *allocib = nb;
284 ip->i_flag |= IN_CHANGE | IN_UPDATE;
285 }
286 /*
287 * Fetch through the indirect blocks, allocating as necessary.
288 */
289 for (i = 1;;) {
290 error = meta_bread(vp,
291 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
292 if (error) {
293 brelse(bp);
294 goto fail;
295 }
296 bap = (ufs_daddr_t *)bp->b_data;
297 #if REV_ENDIAN_FS
298 if (rev_endian)
299 nb = NXSwapLong(bap[indirs[i].in_off]);
300 else {
301 #endif /* REV_ENDIAN_FS */
302 nb = bap[indirs[i].in_off];
303 #if REV_ENDIAN_FS
304 }
305 #endif /* REV_ENDIAN_FS */
306 if (i == num)
307 break;
308 i += 1;
309 if (nb != 0) {
310 brelse(bp);
311 continue;
312 }
313 if (pref == 0)
314 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
315 if (error =
316 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
317 brelse(bp);
318 goto fail;
319 }
320 nb = newb;
321 *allocblk++ = nb;
322 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
323 nbp->b_blkno = fsbtodb(fs, nb);
324 clrbuf(nbp);
325 /*
326 * Write synchronously so that indirect blocks
327 * never point at garbage.
328 */
329 if (error = bwrite(nbp)) {
330 brelse(bp);
331 goto fail;
332 }
333 #if REV_ENDIAN_FS
334 if (rev_endian)
335 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
336 else {
337 #endif /* REV_ENDIAN_FS */
338 bap[indirs[i - 1].in_off] = nb;
339 #if REV_ENDIAN_FS
340 }
341 #endif /* REV_ENDIAN_FS */
342 /*
343 * If required, write synchronously, otherwise use
344 * delayed write.
345 */
346 if (flags & B_SYNC) {
347 bwrite(bp);
348 } else {
349 bdwrite(bp);
350 }
351 }
352 /*
353 * Get the data block, allocating if necessary.
354 */
355 if (nb == 0) {
356 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
357 if (error = ffs_alloc(ip,
358 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
359 brelse(bp);
360 goto fail;
361 }
362 nb = newb;
363 *allocblk++ = nb;
364 #if REV_ENDIAN_FS
365 if (rev_endian)
366 bap[indirs[i].in_off] = NXSwapLong(nb);
367 else {
368 #endif /* REV_ENDIAN_FS */
369 bap[indirs[i].in_off] = nb;
370 #if REV_ENDIAN_FS
371 }
372 #endif /* REV_ENDIAN_FS */
373 /*
374 * If required, write synchronously, otherwise use
375 * delayed write.
376 */
377 if ((flags & B_SYNC)) {
378 bwrite(bp);
379 } else {
380 bdwrite(bp);
381 }
382 if(alloc_buffer ) {
383 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
384 nbp->b_blkno = fsbtodb(fs, nb);
385 if (flags & B_CLRBUF)
386 clrbuf(nbp);
387 }
388 if (blk_alloc) {
389 *blk_alloc = fs->fs_bsize;
390 }
391 if(alloc_buffer)
392 *bpp = nbp;
393
394 return (0);
395 }
396 brelse(bp);
397 if (alloc_buffer) {
398 if (flags & B_CLRBUF) {
399 error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
400 if (error) {
401 brelse(nbp);
402 goto fail;
403 }
404 } else {
405 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
406 nbp->b_blkno = fsbtodb(fs, nb);
407 }
408 *bpp = nbp;
409 }
410 return (0);
411 fail:
412 /*
413 * If we have failed part way through block allocation, we
414 * have to deallocate any indirect blocks that we have allocated.
415 */
416 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
417 ffs_blkfree(ip, *blkp, fs->fs_bsize);
418 deallocated += fs->fs_bsize;
419 }
420 if (allocib != NULL)
421 *allocib = 0;
422 if (deallocated) {
423 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
424
425 #if QUOTA
426 /*
427 * Restore user's disk quota because allocation failed.
428 */
429 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
430 #endif /* QUOTA */
431 ip->i_blocks -= btodb(deallocated, devBlockSize);
432 ip->i_flag |= IN_CHANGE | IN_UPDATE;
433 }
434 return (error);
435 }
436
437 /*
438 * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence
439 * it does no breads (that could lead to deadblock as the page may be already
440 * marked busy as it is being paged out. Also important to note that we are not
441 * growing the file in pageouts. So ip->i_size cannot increase by this call
442 * due to the way UBC works.
443 * This code is derived from ffs_balloc and many cases of that are dealt
444 * in ffs_balloc are not applicable here
445 * Do not call with B_CLRBUF flags as this should only be called only
446 * from pageouts
447 */
448 ffs_blkalloc(ip, lbn, size, cred, flags)
449 register struct inode *ip;
450 ufs_daddr_t lbn;
451 int size;
452 struct ucred *cred;
453 int flags;
454 {
455 register struct fs *fs;
456 register ufs_daddr_t nb;
457 struct buf *bp, *nbp;
458 struct vnode *vp = ITOV(ip);
459 struct indir indirs[NIADDR + 2];
460 ufs_daddr_t newb, *bap, pref;
461 int deallocated, osize, nsize, num, i, error;
462 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
463 int devBlockSize=0;
464 #if REV_ENDIAN_FS
465 struct mount *mp=vp->v_mount;
466 int rev_endian=(mp->mnt_flag & MNT_REVEND);
467 #endif /* REV_ENDIAN_FS */
468
469 fs = ip->i_fs;
470
471 if(size > fs->fs_bsize)
472 panic("ffs_blkalloc: too large for allocation\n");
473
474 /*
475 * If the next write will extend the file into a new block,
476 * and the file is currently composed of a fragment
477 * this fragment has to be extended to be a full block.
478 */
479 nb = lblkno(fs, ip->i_size);
480 if (nb < NDADDR && nb < lbn) {
481 panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d\n", ip->i_size, lbn);
482 }
483 /*
484 * The first NDADDR blocks are direct blocks
485 */
486 if (lbn < NDADDR) {
487 nb = ip->i_db[lbn];
488 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
489 /* TBD: trivial case; the block is already allocated */
490 return (0);
491 }
492 if (nb != 0) {
493 /*
494 * Consider need to reallocate a fragment.
495 */
496 osize = fragroundup(fs, blkoff(fs, ip->i_size));
497 nsize = fragroundup(fs, size);
498 if (nsize > osize) {
499 panic("ffs_allocblk: trying to extend
500 a fragment \n");
501 }
502 return(0);
503 } else {
504 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
505 nsize = fragroundup(fs, size);
506 else
507 nsize = fs->fs_bsize;
508 error = ffs_alloc(ip, lbn,
509 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
510 nsize, cred, &newb);
511 if (error)
512 return (error);
513 ip->i_db[lbn] = newb;
514 ip->i_flag |= IN_CHANGE | IN_UPDATE;
515 return (0);
516 }
517 }
518 /*
519 * Determine the number of levels of indirection.
520 */
521 pref = 0;
522 if (error = ufs_getlbns(vp, lbn, indirs, &num))
523 return(error);
524
525 if(num == 0) {
526 panic("ffs_blkalloc: file with direct blocks only\n");
527 }
528
529 /*
530 * Fetch the first indirect block allocating if necessary.
531 */
532 --num;
533 nb = ip->i_ib[indirs[0].in_off];
534 allocib = NULL;
535 allocblk = allociblk;
536 if (nb == 0) {
537 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
538 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
539 cred, &newb))
540 return (error);
541 nb = newb;
542 *allocblk++ = nb;
543 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
544 bp->b_blkno = fsbtodb(fs, nb);
545 clrbuf(bp);
546 /*
547 * Write synchronously so that indirect blocks
548 * never point at garbage.
549 */
550 if (error = bwrite(bp))
551 goto fail;
552 allocib = &ip->i_ib[indirs[0].in_off];
553 *allocib = nb;
554 ip->i_flag |= IN_CHANGE | IN_UPDATE;
555 }
556 /*
557 * Fetch through the indirect blocks, allocating as necessary.
558 */
559 for (i = 1;;) {
560 error = meta_bread(vp,
561 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
562 if (error) {
563 brelse(bp);
564 goto fail;
565 }
566 bap = (ufs_daddr_t *)bp->b_data;
567 #if REV_ENDIAN_FS
568 if (rev_endian)
569 nb = NXSwapLong(bap[indirs[i].in_off]);
570 else {
571 #endif /* REV_ENDIAN_FS */
572 nb = bap[indirs[i].in_off];
573 #if REV_ENDIAN_FS
574 }
575 #endif /* REV_ENDIAN_FS */
576 if (i == num)
577 break;
578 i += 1;
579 if (nb != 0) {
580 brelse(bp);
581 continue;
582 }
583 if (pref == 0)
584 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
585 if (error =
586 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
587 brelse(bp);
588 goto fail;
589 }
590 nb = newb;
591 *allocblk++ = nb;
592 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
593 nbp->b_blkno = fsbtodb(fs, nb);
594 clrbuf(nbp);
595 /*
596 * Write synchronously so that indirect blocks
597 * never point at garbage.
598 */
599 if (error = bwrite(nbp)) {
600 brelse(bp);
601 goto fail;
602 }
603 #if REV_ENDIAN_FS
604 if (rev_endian)
605 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
606 else {
607 #endif /* REV_ENDIAN_FS */
608 bap[indirs[i - 1].in_off] = nb;
609 #if REV_ENDIAN_FS
610 }
611 #endif /* REV_ENDIAN_FS */
612 /*
613 * If required, write synchronously, otherwise use
614 * delayed write.
615 */
616 if (flags & B_SYNC) {
617 bwrite(bp);
618 } else {
619 bdwrite(bp);
620 }
621 }
622 /*
623 * Get the data block, allocating if necessary.
624 */
625 if (nb == 0) {
626 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
627 if (error = ffs_alloc(ip,
628 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
629 brelse(bp);
630 goto fail;
631 }
632 nb = newb;
633 *allocblk++ = nb;
634 #if REV_ENDIAN_FS
635 if (rev_endian)
636 bap[indirs[i].in_off] = NXSwapLong(nb);
637 else {
638 #endif /* REV_ENDIAN_FS */
639 bap[indirs[i].in_off] = nb;
640 #if REV_ENDIAN_FS
641 }
642 #endif /* REV_ENDIAN_FS */
643 /*
644 * If required, write synchronously, otherwise use
645 * delayed write.
646 */
647 if (flags & B_SYNC) {
648 bwrite(bp);
649 } else {
650 bdwrite(bp);
651 }
652 return (0);
653 }
654 brelse(bp);
655 return (0);
656 fail:
657 /*
658 * If we have failed part way through block allocation, we
659 * have to deallocate any indirect blocks that we have allocated.
660 */
661 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
662 ffs_blkfree(ip, *blkp, fs->fs_bsize);
663 deallocated += fs->fs_bsize;
664 }
665 if (allocib != NULL)
666 *allocib = 0;
667 if (deallocated) {
668 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
669
670 #if QUOTA
671 /*
672 * Restore user's disk quota because allocation failed.
673 */
674 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
675 #endif /* QUOTA */
676 ip->i_blocks -= btodb(deallocated, devBlockSize);
677 ip->i_flag |= IN_CHANGE | IN_UPDATE;
678 }
679 return (error);
680 }