]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/ufs/ffs/ffs_balloc.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / bsd / ufs / ffs / ffs_balloc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
56 */
57
58#include <rev_endian_fs.h>
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/buf.h>
62#include <sys/proc.h>
63#include <sys/file.h>
64#include <sys/vnode.h>
65#include <sys/ubc.h>
66#if REV_ENDIAN_FS
67#include <sys/mount.h>
68#endif /* REV_ENDIAN_FS */
69
70#include <sys/vm.h>
71
72#include <ufs/ufs/quota.h>
73#include <ufs/ufs/inode.h>
74#include <ufs/ufs/ufs_extern.h>
75
76#include <ufs/ffs/fs.h>
77#include <ufs/ffs/ffs_extern.h>
78
79#if REV_ENDIAN_FS
80#include <ufs/ufs/ufs_byte_order.h>
81#include <architecture/byte_order.h>
82#endif /* REV_ENDIAN_FS */
83
84/*
85 * Balloc defines the structure of file system storage
86 * by allocating the physical blocks on a device given
87 * the inode and the logical block number in a file.
88 */
89ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc)
90 register struct inode *ip;
91 register ufs_daddr_t lbn;
92 int size;
93 struct ucred *cred;
94 struct buf **bpp;
95 int flags;
96 int * blk_alloc;
97{
98 register struct fs *fs;
99 register ufs_daddr_t nb;
100 struct buf *bp, *nbp;
101 struct vnode *vp = ITOV(ip);
102 struct indir indirs[NIADDR + 2];
103 ufs_daddr_t newb, *bap, pref;
104 int deallocated, osize, nsize, num, i, error;
105 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
106 int devBlockSize=0;
107 int alloc_buffer = 1;
108#if REV_ENDIAN_FS
109 struct mount *mp=vp->v_mount;
110 int rev_endian=(mp->mnt_flag & MNT_REVEND);
111#endif /* REV_ENDIAN_FS */
112
113 *bpp = NULL;
114 if (lbn < 0)
115 return (EFBIG);
116 fs = ip->i_fs;
117 if (flags & B_NOBUFF)
118 alloc_buffer = 0;
119
120 if (blk_alloc)
121 *blk_alloc = 0;
122
123 /*
124 * If the next write will extend the file into a new block,
125 * and the file is currently composed of a fragment
126 * this fragment has to be extended to be a full block.
127 */
128 nb = lblkno(fs, ip->i_size);
129 if (nb < NDADDR && nb < lbn) {
130 /* the filesize prior to this write can fit in direct
131 * blocks (ie. fragmentaion is possibly done)
132 * we are now extending the file write beyond
133 * the block which has end of file prior to this write
134 */
135 osize = blksize(fs, ip, nb);
136 /* osize gives disk allocated size in the last block. It is
137 * either in fragments or a file system block size */
138 if (osize < fs->fs_bsize && osize > 0) {
139 /* few fragments are already allocated,since the
140 * current extends beyond this block
141 * allocate the complete block as fragments are only
142 * in last block
143 */
144 error = ffs_realloccg(ip, nb,
145 ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
146 osize, (int)fs->fs_bsize, cred, &bp);
147 if (error)
148 return (error);
149 /* adjust the innode size we just grew */
150 /* it is in nb+1 as nb starts from 0 */
151 ip->i_size = (nb + 1) * fs->fs_bsize;
152 if (UBCISVALID(vp))
153 ubc_setsize(vp, (off_t)ip->i_size); /* XXX check error */
154 ip->i_db[nb] = dbtofsb(fs, bp->b_blkno);
155 ip->i_flag |= IN_CHANGE | IN_UPDATE;
156 if ((flags & B_SYNC) || (!alloc_buffer)) {
157 if (!alloc_buffer)
158 SET(bp->b_flags, B_INVAL);
159 bwrite(bp);
160 } else
161 bawrite(bp);
162 /* note that bp is already released here */
163 }
164 }
165 /*
166 * The first NDADDR blocks are direct blocks
167 */
168 if (lbn < NDADDR) {
169 nb = ip->i_db[lbn];
170 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
171 if (alloc_buffer) {
172 error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
173 if (error) {
174 brelse(bp);
175 return (error);
176 }
177 *bpp = bp;
178 }
179 return (0);
180 }
181 if (nb != 0) {
182 /*
183 * Consider need to reallocate a fragment.
184 */
185 osize = fragroundup(fs, blkoff(fs, ip->i_size));
186 nsize = fragroundup(fs, size);
187 if (nsize <= osize) {
188 if (alloc_buffer) {
189 error = bread(vp, lbn, osize, NOCRED, &bp);
190 if (error) {
191 brelse(bp);
192 return (error);
193 }
194 ip->i_flag |= IN_CHANGE | IN_UPDATE;
195 *bpp = bp;
196 return (0);
197 }
198 else {
199 ip->i_flag |= IN_CHANGE | IN_UPDATE;
200 return (0);
201 }
202 } else {
203 error = ffs_realloccg(ip, lbn,
204 ffs_blkpref(ip, lbn, (int)lbn,
205 &ip->i_db[0]), osize, nsize, cred, &bp);
206 if (error)
207 return (error);
208 ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno);
209 ip->i_flag |= IN_CHANGE | IN_UPDATE;
210 if(!alloc_buffer) {
211 SET(bp->b_flags, B_INVAL);
212 bwrite(bp);
213 } else
214 *bpp = bp;
215 return (0);
216
217 }
218 } else {
219 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
220 nsize = fragroundup(fs, size);
221 else
222 nsize = fs->fs_bsize;
223 error = ffs_alloc(ip, lbn,
224 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
225 nsize, cred, &newb);
226 if (error)
227 return (error);
228 if (alloc_buffer) {
229 bp = getblk(vp, lbn, nsize, 0, 0, BLK_WRITE);
230 bp->b_blkno = fsbtodb(fs, newb);
231 if (flags & B_CLRBUF)
232 clrbuf(bp);
233 }
234 ip->i_db[lbn] = newb;
235 ip->i_flag |= IN_CHANGE | IN_UPDATE;
236 if (blk_alloc) {
237 *blk_alloc = nsize;
238 }
239 if (alloc_buffer)
240 *bpp = bp;
241 return (0);
242 }
243 }
244 /*
245 * Determine the number of levels of indirection.
246 */
247 pref = 0;
248 if (error = ufs_getlbns(vp, lbn, indirs, &num))
249 return(error);
250#if DIAGNOSTIC
251 if (num < 1)
252 panic ("ffs_balloc: ufs_bmaparray returned indirect block\n");
253#endif
254 /*
255 * Fetch the first indirect block allocating if necessary.
256 */
257 --num;
258 nb = ip->i_ib[indirs[0].in_off];
259 allocib = NULL;
260 allocblk = allociblk;
261 if (nb == 0) {
262 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
263 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
264 cred, &newb))
265 return (error);
266 nb = newb;
267 *allocblk++ = nb;
268 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
269 bp->b_blkno = fsbtodb(fs, nb);
270 clrbuf(bp);
271 /*
272 * Write synchronously so that indirect blocks
273 * never point at garbage.
274 */
275 if (error = bwrite(bp))
276 goto fail;
277 allocib = &ip->i_ib[indirs[0].in_off];
278 *allocib = nb;
279 ip->i_flag |= IN_CHANGE | IN_UPDATE;
280 }
281 /*
282 * Fetch through the indirect blocks, allocating as necessary.
283 */
284 for (i = 1;;) {
285 error = meta_bread(vp,
286 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
287 if (error) {
288 brelse(bp);
289 goto fail;
290 }
291 bap = (ufs_daddr_t *)bp->b_data;
292#if REV_ENDIAN_FS
293 if (rev_endian)
294 nb = NXSwapLong(bap[indirs[i].in_off]);
295 else {
296#endif /* REV_ENDIAN_FS */
297 nb = bap[indirs[i].in_off];
298#if REV_ENDIAN_FS
299 }
300#endif /* REV_ENDIAN_FS */
301 if (i == num)
302 break;
303 i += 1;
304 if (nb != 0) {
305 brelse(bp);
306 continue;
307 }
308 if (pref == 0)
309 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
310 if (error =
311 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
312 brelse(bp);
313 goto fail;
314 }
315 nb = newb;
316 *allocblk++ = nb;
317 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
318 nbp->b_blkno = fsbtodb(fs, nb);
319 clrbuf(nbp);
320 /*
321 * Write synchronously so that indirect blocks
322 * never point at garbage.
323 */
324 if (error = bwrite(nbp)) {
325 brelse(bp);
326 goto fail;
327 }
328#if REV_ENDIAN_FS
329 if (rev_endian)
330 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
331 else {
332#endif /* REV_ENDIAN_FS */
333 bap[indirs[i - 1].in_off] = nb;
334#if REV_ENDIAN_FS
335 }
336#endif /* REV_ENDIAN_FS */
337 /*
338 * If required, write synchronously, otherwise use
339 * delayed write.
340 */
341 if (flags & B_SYNC) {
342 bwrite(bp);
343 } else {
344 bdwrite(bp);
345 }
346 }
347 /*
348 * Get the data block, allocating if necessary.
349 */
350 if (nb == 0) {
351 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
352 if (error = ffs_alloc(ip,
353 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
354 brelse(bp);
355 goto fail;
356 }
357 nb = newb;
358 *allocblk++ = nb;
359#if REV_ENDIAN_FS
360 if (rev_endian)
361 bap[indirs[i].in_off] = NXSwapLong(nb);
362 else {
363#endif /* REV_ENDIAN_FS */
364 bap[indirs[i].in_off] = nb;
365#if REV_ENDIAN_FS
366 }
367#endif /* REV_ENDIAN_FS */
368 /*
369 * If required, write synchronously, otherwise use
370 * delayed write.
371 */
372 if ((flags & B_SYNC)) {
373 bwrite(bp);
374 } else {
375 bdwrite(bp);
376 }
377 if(alloc_buffer ) {
378 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
379 nbp->b_blkno = fsbtodb(fs, nb);
380 if (flags & B_CLRBUF)
381 clrbuf(nbp);
382 }
383 if (blk_alloc) {
384 *blk_alloc = fs->fs_bsize;
385 }
386 if(alloc_buffer)
387 *bpp = nbp;
388
389 return (0);
390 }
391 brelse(bp);
392 if (alloc_buffer) {
393 if (flags & B_CLRBUF) {
394 error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
395 if (error) {
396 brelse(nbp);
397 goto fail;
398 }
399 } else {
400 nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE);
401 nbp->b_blkno = fsbtodb(fs, nb);
402 }
403 *bpp = nbp;
404 }
405 return (0);
406fail:
407 /*
408 * If we have failed part way through block allocation, we
409 * have to deallocate any indirect blocks that we have allocated.
410 */
411 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
412 ffs_blkfree(ip, *blkp, fs->fs_bsize);
413 deallocated += fs->fs_bsize;
414 }
415 if (allocib != NULL)
416 *allocib = 0;
417 if (deallocated) {
418 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
419
420#if QUOTA
421 /*
422 * Restore user's disk quota because allocation failed.
423 */
424 (void) chkdq(ip, (long)-btodb(deallocated, devBlockSize), cred, FORCE);
425#endif /* QUOTA */
426 ip->i_blocks -= btodb(deallocated, devBlockSize);
427 ip->i_flag |= IN_CHANGE | IN_UPDATE;
428 }
429 return (error);
430}
431
432/*
433 * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence
434 * it does no breads (that could lead to deadblock as the page may be already
435 * marked busy as it is being paged out. Also important to note that we are not
436 * growing the file in pageouts. So ip->i_size cannot increase by this call
437 * due to the way UBC works.
438 * This code is derived from ffs_balloc and many cases of that are dealt
439 * in ffs_balloc are not applicable here
440 * Do not call with B_CLRBUF flags as this should only be called only
441 * from pageouts
442 */
443ffs_blkalloc(ip, lbn, size, cred, flags)
444 register struct inode *ip;
445 ufs_daddr_t lbn;
446 int size;
447 struct ucred *cred;
448 int flags;
449{
450 register struct fs *fs;
451 register ufs_daddr_t nb;
452 struct buf *bp, *nbp;
453 struct vnode *vp = ITOV(ip);
454 struct indir indirs[NIADDR + 2];
455 ufs_daddr_t newb, *bap, pref;
456 int deallocated, osize, nsize, num, i, error;
457 ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
458 int devBlockSize=0;
459#if REV_ENDIAN_FS
460 struct mount *mp=vp->v_mount;
461 int rev_endian=(mp->mnt_flag & MNT_REVEND);
462#endif /* REV_ENDIAN_FS */
463
464 fs = ip->i_fs;
465
466 if(size > fs->fs_bsize)
467 panic("ffs_blkalloc: too large for allocation\n");
468
469 /*
470 * If the next write will extend the file into a new block,
471 * and the file is currently composed of a fragment
472 * this fragment has to be extended to be a full block.
473 */
474 nb = lblkno(fs, ip->i_size);
475 if (nb < NDADDR && nb < lbn) {
476 panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d\n", ip->i_size, lbn);
477 }
478 /*
479 * The first NDADDR blocks are direct blocks
480 */
481 if (lbn < NDADDR) {
482 nb = ip->i_db[lbn];
483 if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
484 /* TBD: trivial case; the block is already allocated */
485 return (0);
486 }
487 if (nb != 0) {
488 /*
489 * Consider need to reallocate a fragment.
490 */
491 osize = fragroundup(fs, blkoff(fs, ip->i_size));
492 nsize = fragroundup(fs, size);
493 if (nsize > osize) {
494 panic("ffs_allocblk: trying to extend
495 a fragment \n");
496 }
497 return(0);
498 } else {
499 if (ip->i_size < (lbn + 1) * fs->fs_bsize)
500 nsize = fragroundup(fs, size);
501 else
502 nsize = fs->fs_bsize;
503 error = ffs_alloc(ip, lbn,
504 ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
505 nsize, cred, &newb);
506 if (error)
507 return (error);
508 ip->i_db[lbn] = newb;
509 ip->i_flag |= IN_CHANGE | IN_UPDATE;
510 return (0);
511 }
512 }
513 /*
514 * Determine the number of levels of indirection.
515 */
516 pref = 0;
517 if (error = ufs_getlbns(vp, lbn, indirs, &num))
518 return(error);
519
520 if(num == 0) {
521 panic("ffs_blkalloc: file with direct blocks only\n");
522 }
523
524 /*
525 * Fetch the first indirect block allocating if necessary.
526 */
527 --num;
528 nb = ip->i_ib[indirs[0].in_off];
529 allocib = NULL;
530 allocblk = allociblk;
531 if (nb == 0) {
532 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
533 if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
534 cred, &newb))
535 return (error);
536 nb = newb;
537 *allocblk++ = nb;
538 bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
539 bp->b_blkno = fsbtodb(fs, nb);
540 clrbuf(bp);
541 /*
542 * Write synchronously so that indirect blocks
543 * never point at garbage.
544 */
545 if (error = bwrite(bp))
546 goto fail;
547 allocib = &ip->i_ib[indirs[0].in_off];
548 *allocib = nb;
549 ip->i_flag |= IN_CHANGE | IN_UPDATE;
550 }
551 /*
552 * Fetch through the indirect blocks, allocating as necessary.
553 */
554 for (i = 1;;) {
555 error = meta_bread(vp,
556 indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
557 if (error) {
558 brelse(bp);
559 goto fail;
560 }
561 bap = (ufs_daddr_t *)bp->b_data;
562#if REV_ENDIAN_FS
563 if (rev_endian)
564 nb = NXSwapLong(bap[indirs[i].in_off]);
565 else {
566#endif /* REV_ENDIAN_FS */
567 nb = bap[indirs[i].in_off];
568#if REV_ENDIAN_FS
569 }
570#endif /* REV_ENDIAN_FS */
571 if (i == num)
572 break;
573 i += 1;
574 if (nb != 0) {
575 brelse(bp);
576 continue;
577 }
578 if (pref == 0)
579 pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
580 if (error =
581 ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
582 brelse(bp);
583 goto fail;
584 }
585 nb = newb;
586 *allocblk++ = nb;
587 nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META);
588 nbp->b_blkno = fsbtodb(fs, nb);
589 clrbuf(nbp);
590 /*
591 * Write synchronously so that indirect blocks
592 * never point at garbage.
593 */
594 if (error = bwrite(nbp)) {
595 brelse(bp);
596 goto fail;
597 }
598#if REV_ENDIAN_FS
599 if (rev_endian)
600 bap[indirs[i - 1].in_off] = NXSwapLong(nb);
601 else {
602#endif /* REV_ENDIAN_FS */
603 bap[indirs[i - 1].in_off] = nb;
604#if REV_ENDIAN_FS
605 }
606#endif /* REV_ENDIAN_FS */
607 /*
608 * If required, write synchronously, otherwise use
609 * delayed write.
610 */
611 if (flags & B_SYNC) {
612 bwrite(bp);
613 } else {
614 bdwrite(bp);
615 }
616 }
617 /*
618 * Get the data block, allocating if necessary.
619 */
620 if (nb == 0) {
621 pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
622 if (error = ffs_alloc(ip,
623 lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
624 brelse(bp);
625 goto fail;
626 }
627 nb = newb;
628 *allocblk++ = nb;
629#if REV_ENDIAN_FS
630 if (rev_endian)
631 bap[indirs[i].in_off] = NXSwapLong(nb);
632 else {
633#endif /* REV_ENDIAN_FS */
634 bap[indirs[i].in_off] = nb;
635#if REV_ENDIAN_FS
636 }
637#endif /* REV_ENDIAN_FS */
638 /*
639 * If required, write synchronously, otherwise use
640 * delayed write.
641 */
642 if (flags & B_SYNC) {
643 bwrite(bp);
644 } else {
645 bdwrite(bp);
646 }
647 return (0);
648 }
649 brelse(bp);
650 return (0);
651fail:
652 /*
653 * If we have failed part way through block allocation, we
654 * have to deallocate any indirect blocks that we have allocated.
655 */
656 for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
657 ffs_blkfree(ip, *blkp, fs->fs_bsize);
658 deallocated += fs->fs_bsize;
659 }
660 if (allocib != NULL)
661 *allocib = 0;
662 if (deallocated) {
663 VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize);
664
665#if QUOTA
666 /*
667 * Restore user's disk quota because allocation failed.
668 */
669 (void) chkdq(ip, (long)-btodb(deallocated, devBlockSize), cred, FORCE);
670#endif /* QUOTA */
671 ip->i_blocks -= btodb(deallocated, devBlockSize);
672 ip->i_flag |= IN_CHANGE | IN_UPDATE;
673 }
674 return (error);
675}