]> git.saurik.com Git - apple/xnu.git/blob - bsd/ufs/ffs/ffs_balloc.c
xnu-344.tar.gz
[apple/xnu.git] / bsd / ufs / ffs / ffs_balloc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
56 */
57
58 #include <rev_endian_fs.h>
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/buf.h>
62 #include <sys/proc.h>
63 #include <sys/file.h>
64 #include <sys/vnode.h>
65 #include <sys/ubc.h>
66 #include <sys/quota.h>
67
68 #if REV_ENDIAN_FS
69 #include <sys/mount.h>
70 #endif /* REV_ENDIAN_FS */
71
72 #include <sys/vm.h>
73
74 #include <ufs/ufs/quota.h>
75 #include <ufs/ufs/inode.h>
76 #include <ufs/ufs/ufs_extern.h>
77
78 #include <ufs/ffs/fs.h>
79 #include <ufs/ffs/ffs_extern.h>
80
81 #if REV_ENDIAN_FS
82 #include <ufs/ufs/ufs_byte_order.h>
83 #include <architecture/byte_order.h>
84 #endif /* REV_ENDIAN_FS */
85
86 /*
87 * Balloc defines the structure of file system storage
88 * by allocating the physical blocks on a device given
89 * the inode and the logical block number in a file.
90 */
91 ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc)
92 register struct inode *ip;
93 register ufs_daddr_t lbn;
94 int size;
95 struct ucred *cred;
96 struct buf **bpp;
97 int flags;
98 int * blk_alloc;
99 {
100 register struct fs *fs;
101 register ufs_daddr_t nb;
102 struct buf *bp, *nbp;
103 struct vnode *vp = ITOV(ip);
104 struct indir indirs[NIADDR + 2];
105 ufs_daddr_t newb, *bap, pref;
106 int deallocated, osize, nsize, num, i, error;
107 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
108 int devBlockSize=0;
109 int alloc_buffer = 1;
110 #if REV_ENDIAN_FS
111 struct mount *mp=vp->v_mount;
112 int rev_endian=(mp->mnt_flag & MNT_REVEND);
113 #endif /* REV_ENDIAN_FS */
114
115 *bpp = NULL;
116 if (lbn < 0)
117 return (EFBIG);
118 fs = ip->i_fs;
119 if (flags & B_NOBUFF)
120 alloc_buffer = 0;
121
122 if (blk_alloc)
123 *blk_alloc = 0;
124
125 /*
126 * If the next write will extend the file into a new block,
127 * and the file is currently composed of a fragment
128 * this fragment has to be extended to be a full block.
129 */
130 nb = lblkno(fs, ip->i_size);
131 if (nb < NDADDR && nb < lbn) {
132 /* the filesize prior to this write can fit in direct
133 * blocks (ie. fragmentaion is possibly done)
134 * we are now extending the file write beyond
135 * the block which has end of file prior to this write
136 */
137 osize = blksize(fs, ip, nb);
138 /* osize gives disk allocated size in the last block. It is
139 * either in fragments or a file system block size */
140 if (osize < fs->fs_bsize && osize > 0) {
141 /* few fragments are already allocated,since the
142 * current extends beyond this block
143 * allocate the complete block as fragments are only
144 * in last block
145 */
146 error = ffs_realloccg(ip, nb,
147 ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
148 osize, (int)fs->fs_bsize, cred, &bp);
149 if (error)
150 return (error);
151 /* adjust the innode size we just grew */
152 /* it is in nb+1 as nb starts from 0 */
153 ip->i_size = (nb + 1) * fs->fs_bsize;
154 if (UBCISVALID(vp))
155 ubc_setsize(vp, (off_t)ip->i_size); /* XXX check error */
156 ip->i_db[nb] = dbtofsb(fs, bp->b_blkno);
157 ip->i_flag |= IN_CHANGE | IN_UPDATE;
158 if ((flags & B_SYNC) || (!alloc_buffer)) {
159 if (!alloc_buffer)
160 SET(bp->b_flags, B_INVAL);
161 bwrite(bp);
162 } else
163 bawrite(bp);
164 /* note that bp is already released here */
165 }
166 }
167 /*
168 * The first NDADDR blocks are direct blocks
169 */
170 if (lbn < NDADDR) {
171 nb = ip->i_db[lbn];
172 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
173 if (alloc_buffer) {
174 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
175 if (error) {
176 brelse(bp);
177 return (error);
178 }
179 *bpp = bp;
180 }
181 return (0);
182 }
183 if (nb != 0) {
184 /*
185 * Consider need to reallocate a fragment.
186 */
187 osize = fragroundup(fs, blkoff(fs, ip->i_size));
188 nsize = fragroundup(fs, size);
189 if (nsize <= osize) {
190 if (alloc_buffer) {
191 error = bread(vp, lbn, osize, NOCRED, &bp);
192 if (error) {
193 brelse(bp);
194 return (error);
195 }
196 ip->i_flag |= IN_CHANGE | IN_UPDATE;
197 *bpp = bp;
198 return (0);
199 }
200 else {
201 ip->i_flag |= IN_CHANGE | IN_UPDATE;
202 return (0);
203 }
204 } else {
205 error = ffs_realloccg(ip, lbn,
206 ffs_blkpref(ip, lbn, (int)lbn,
207 &ip->i_db[0]), osize, nsize, cred, &bp);
208 if (error)
209 return (error);
210 ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno);
211 ip->i_flag |= IN_CHANGE | IN_UPDATE;
212 if(!alloc_buffer) {
213 SET(bp->b_flags, B_INVAL);
214 bwrite(bp);
215 } else
216 *bpp = bp;
217 return (0);
218
219 }
220 } else {
221 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
222 nsize = fragroundup(fs, size);
223 else
224 nsize = fs->fs_bsize;
225 error = ffs_alloc(ip, lbn,
226 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
227 nsize, cred, &newb);
228 if (error)
229 return (error);
230 if (alloc_buffer) {
231 bp = getblk(vp, lbn, nsize, 0, 0, BLK_WRITE);
232 bp->b_blkno = fsbtodb(fs, newb);
233 if (flags & B_CLRBUF)
234 clrbuf(bp);
235 }
236 ip->i_db[lbn] = newb;
237 ip->i_flag |= IN_CHANGE | IN_UPDATE;
238 if (blk_alloc) {
239 *blk_alloc = nsize;
240 }
241 if (alloc_buffer)
242 *bpp = bp;
243 return (0);
244 }
245 }
246 /*
247 * Determine the number of levels of indirection.
248 */
249 pref = 0;
250 if (error = ufs_getlbns(vp, lbn, indirs, &num))
251 return(error);
252 #if DIAGNOSTIC
253 if (num < 1)
254 panic ("ffs_balloc: ufs_bmaparray returned indirect block\n");
255 #endif
256 /*
257 * Fetch the first indirect block allocating if necessary.
258 */
259 --num;
260 nb = ip->i_ib[indirs[0].in_off];
261 allocib = NULL;
262 allocblk = allociblk;
263 if (nb == 0) {
264 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
265 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
266 cred, &newb))
267 return (error);
268 nb = newb;
269 *allocblk++ = nb;
270 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
271 bp->b_blkno = fsbtodb(fs, nb);
272 clrbuf(bp);
273 /*
274 * Write synchronously so that indirect blocks
275 * never point at garbage.
276 */
277 if (error = bwrite(bp))
278 goto fail;
279 allocib = &ip->i_ib[indirs[0].in_off];
280 *allocib = nb;
281 ip->i_flag |= IN_CHANGE | IN_UPDATE;
282 }
283 /*
284 * Fetch through the indirect blocks, allocating as necessary.
285 */
286 for (i = 1;;) {
287 error = meta_bread(vp,
288 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
289 if (error) {
290 brelse(bp);
291 goto fail;
292 }
293 bap = (ufs_daddr_t *)bp->b_data;
294 #if REV_ENDIAN_FS
295 if (rev_endian)
296 nb = NXSwapLong(bap[indirs[i].in_off]);
297 else {
298 #endif /* REV_ENDIAN_FS */
299 nb = bap[indirs[i].in_off];
300 #if REV_ENDIAN_FS
301 }
302 #endif /* REV_ENDIAN_FS */
303 if (i == num)
304 break;
305 i += 1;
306 if (nb != 0) {
307 brelse(bp);
308 continue;
309 }
310 if (pref == 0)
311 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
312 if (error =
313 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
314 brelse(bp);
315 goto fail;
316 }
317 nb = newb;
318 *allocblk++ = nb;
319 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
320 nbp->b_blkno = fsbtodb(fs, nb);
321 clrbuf(nbp);
322 /*
323 * Write synchronously so that indirect blocks
324 * never point at garbage.
325 */
326 if (error = bwrite(nbp)) {
327 brelse(bp);
328 goto fail;
329 }
330 #if REV_ENDIAN_FS
331 if (rev_endian)
332 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
333 else {
334 #endif /* REV_ENDIAN_FS */
335 bap[indirs[i - 1].in_off] = nb;
336 #if REV_ENDIAN_FS
337 }
338 #endif /* REV_ENDIAN_FS */
339 /*
340 * If required, write synchronously, otherwise use
341 * delayed write.
342 */
343 if (flags & B_SYNC) {
344 bwrite(bp);
345 } else {
346 bdwrite(bp);
347 }
348 }
349 /*
350 * Get the data block, allocating if necessary.
351 */
352 if (nb == 0) {
353 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
354 if (error = ffs_alloc(ip,
355 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
356 brelse(bp);
357 goto fail;
358 }
359 nb = newb;
360 *allocblk++ = nb;
361 #if REV_ENDIAN_FS
362 if (rev_endian)
363 bap[indirs[i].in_off] = NXSwapLong(nb);
364 else {
365 #endif /* REV_ENDIAN_FS */
366 bap[indirs[i].in_off] = nb;
367 #if REV_ENDIAN_FS
368 }
369 #endif /* REV_ENDIAN_FS */
370 /*
371 * If required, write synchronously, otherwise use
372 * delayed write.
373 */
374 if ((flags & B_SYNC)) {
375 bwrite(bp);
376 } else {
377 bdwrite(bp);
378 }
379 if(alloc_buffer ) {
380 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
381 nbp->b_blkno = fsbtodb(fs, nb);
382 if (flags & B_CLRBUF)
383 clrbuf(nbp);
384 }
385 if (blk_alloc) {
386 *blk_alloc = fs->fs_bsize;
387 }
388 if(alloc_buffer)
389 *bpp = nbp;
390
391 return (0);
392 }
393 brelse(bp);
394 if (alloc_buffer) {
395 if (flags & B_CLRBUF) {
396 error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
397 if (error) {
398 brelse(nbp);
399 goto fail;
400 }
401 } else {
402 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
403 nbp->b_blkno = fsbtodb(fs, nb);
404 }
405 *bpp = nbp;
406 }
407 return (0);
408 fail:
409 /*
410 * If we have failed part way through block allocation, we
411 * have to deallocate any indirect blocks that we have allocated.
412 */
413 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
414 ffs_blkfree(ip, *blkp, fs->fs_bsize);
415 deallocated += fs->fs_bsize;
416 }
417 if (allocib != NULL)
418 *allocib = 0;
419 if (deallocated) {
420 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
421
422 #if QUOTA
423 /*
424 * Restore user's disk quota because allocation failed.
425 */
426 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
427 #endif /* QUOTA */
428 ip->i_blocks -= btodb(deallocated, devBlockSize);
429 ip->i_flag |= IN_CHANGE | IN_UPDATE;
430 }
431 return (error);
432 }
433
434 /*
435 * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence
436 * it does no breads (that could lead to deadblock as the page may be already
437 * marked busy as it is being paged out. Also important to note that we are not
438 * growing the file in pageouts. So ip->i_size cannot increase by this call
439 * due to the way UBC works.
440 * This code is derived from ffs_balloc and many cases of that are dealt
441 * in ffs_balloc are not applicable here
442 * Do not call with B_CLRBUF flags as this should only be called only
443 * from pageouts
444 */
445 ffs_blkalloc(ip, lbn, size, cred, flags)
446 register struct inode *ip;
447 ufs_daddr_t lbn;
448 int size;
449 struct ucred *cred;
450 int flags;
451 {
452 register struct fs *fs;
453 register ufs_daddr_t nb;
454 struct buf *bp, *nbp;
455 struct vnode *vp = ITOV(ip);
456 struct indir indirs[NIADDR + 2];
457 ufs_daddr_t newb, *bap, pref;
458 int deallocated, osize, nsize, num, i, error;
459 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
460 int devBlockSize=0;
461 #if REV_ENDIAN_FS
462 struct mount *mp=vp->v_mount;
463 int rev_endian=(mp->mnt_flag & MNT_REVEND);
464 #endif /* REV_ENDIAN_FS */
465
466 fs = ip->i_fs;
467
468 if(size > fs->fs_bsize)
469 panic("ffs_blkalloc: too large for allocation\n");
470
471 /*
472 * If the next write will extend the file into a new block,
473 * and the file is currently composed of a fragment
474 * this fragment has to be extended to be a full block.
475 */
476 nb = lblkno(fs, ip->i_size);
477 if (nb < NDADDR && nb < lbn) {
478 panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d\n", ip->i_size, lbn);
479 }
480 /*
481 * The first NDADDR blocks are direct blocks
482 */
483 if (lbn < NDADDR) {
484 nb = ip->i_db[lbn];
485 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
486 /* TBD: trivial case; the block is already allocated */
487 return (0);
488 }
489 if (nb != 0) {
490 /*
491 * Consider need to reallocate a fragment.
492 */
493 osize = fragroundup(fs, blkoff(fs, ip->i_size));
494 nsize = fragroundup(fs, size);
495 if (nsize > osize) {
496 panic("ffs_allocblk: trying to extend
497 a fragment \n");
498 }
499 return(0);
500 } else {
501 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
502 nsize = fragroundup(fs, size);
503 else
504 nsize = fs->fs_bsize;
505 error = ffs_alloc(ip, lbn,
506 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
507 nsize, cred, &newb);
508 if (error)
509 return (error);
510 ip->i_db[lbn] = newb;
511 ip->i_flag |= IN_CHANGE | IN_UPDATE;
512 return (0);
513 }
514 }
515 /*
516 * Determine the number of levels of indirection.
517 */
518 pref = 0;
519 if (error = ufs_getlbns(vp, lbn, indirs, &num))
520 return(error);
521
522 if(num == 0) {
523 panic("ffs_blkalloc: file with direct blocks only\n");
524 }
525
526 /*
527 * Fetch the first indirect block allocating if necessary.
528 */
529 --num;
530 nb = ip->i_ib[indirs[0].in_off];
531 allocib = NULL;
532 allocblk = allociblk;
533 if (nb == 0) {
534 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
535 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
536 cred, &newb))
537 return (error);
538 nb = newb;
539 *allocblk++ = nb;
540 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
541 bp->b_blkno = fsbtodb(fs, nb);
542 clrbuf(bp);
543 /*
544 * Write synchronously so that indirect blocks
545 * never point at garbage.
546 */
547 if (error = bwrite(bp))
548 goto fail;
549 allocib = &ip->i_ib[indirs[0].in_off];
550 *allocib = nb;
551 ip->i_flag |= IN_CHANGE | IN_UPDATE;
552 }
553 /*
554 * Fetch through the indirect blocks, allocating as necessary.
555 */
556 for (i = 1;;) {
557 error = meta_bread(vp,
558 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
559 if (error) {
560 brelse(bp);
561 goto fail;
562 }
563 bap = (ufs_daddr_t *)bp->b_data;
564 #if REV_ENDIAN_FS
565 if (rev_endian)
566 nb = NXSwapLong(bap[indirs[i].in_off]);
567 else {
568 #endif /* REV_ENDIAN_FS */
569 nb = bap[indirs[i].in_off];
570 #if REV_ENDIAN_FS
571 }
572 #endif /* REV_ENDIAN_FS */
573 if (i == num)
574 break;
575 i += 1;
576 if (nb != 0) {
577 brelse(bp);
578 continue;
579 }
580 if (pref == 0)
581 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
582 if (error =
583 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
584 brelse(bp);
585 goto fail;
586 }
587 nb = newb;
588 *allocblk++ = nb;
589 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
590 nbp->b_blkno = fsbtodb(fs, nb);
591 clrbuf(nbp);
592 /*
593 * Write synchronously so that indirect blocks
594 * never point at garbage.
595 */
596 if (error = bwrite(nbp)) {
597 brelse(bp);
598 goto fail;
599 }
600 #if REV_ENDIAN_FS
601 if (rev_endian)
602 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
603 else {
604 #endif /* REV_ENDIAN_FS */
605 bap[indirs[i - 1].in_off] = nb;
606 #if REV_ENDIAN_FS
607 }
608 #endif /* REV_ENDIAN_FS */
609 /*
610 * If required, write synchronously, otherwise use
611 * delayed write.
612 */
613 if (flags & B_SYNC) {
614 bwrite(bp);
615 } else {
616 bdwrite(bp);
617 }
618 }
619 /*
620 * Get the data block, allocating if necessary.
621 */
622 if (nb == 0) {
623 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
624 if (error = ffs_alloc(ip,
625 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
626 brelse(bp);
627 goto fail;
628 }
629 nb = newb;
630 *allocblk++ = nb;
631 #if REV_ENDIAN_FS
632 if (rev_endian)
633 bap[indirs[i].in_off] = NXSwapLong(nb);
634 else {
635 #endif /* REV_ENDIAN_FS */
636 bap[indirs[i].in_off] = nb;
637 #if REV_ENDIAN_FS
638 }
639 #endif /* REV_ENDIAN_FS */
640 /*
641 * If required, write synchronously, otherwise use
642 * delayed write.
643 */
644 if (flags & B_SYNC) {
645 bwrite(bp);
646 } else {
647 bdwrite(bp);
648 }
649 return (0);
650 }
651 brelse(bp);
652 return (0);
653 fail:
654 /*
655 * If we have failed part way through block allocation, we
656 * have to deallocate any indirect blocks that we have allocated.
657 */
658 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
659 ffs_blkfree(ip, *blkp, fs->fs_bsize);
660 deallocated += fs->fs_bsize;
661 }
662 if (allocib != NULL)
663 *allocib = 0;
664 if (deallocated) {
665 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
666
667 #if QUOTA
668 /*
669 * Restore user's disk quota because allocation failed.
670 */
671 (void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
672 #endif /* QUOTA */
673 ip->i_blocks -= btodb(deallocated, devBlockSize);
674 ip->i_flag |= IN_CHANGE | IN_UPDATE;
675 }
676 return (error);
677 }