2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
27 * Copyright (c) 1982, 1986, 1989, 1993
28 * The Regents of the University of California. All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
60 #include <rev_endian_fs.h>
61 #include <vm/vm_pager.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
67 #include <sys/vnode.h>
68 #include <sys/mount.h>
69 #include <sys/kernel.h>
70 #include <sys/syslog.h>
71 #include <sys/quota.h>
75 #include <ufs/ufs/quota.h>
76 #include <ufs/ufs/inode.h>
78 #include <ufs/ffs/fs.h>
79 #include <ufs/ffs/ffs_extern.h>
82 #include <ufs/ufs/ufs_byte_order.h>
83 #include <architecture/byte_order.h>
84 #endif /* REV_ENDIAN_FS */
86 extern u_long nextgennumber
;
88 static ufs_daddr_t ffs_alloccg
__P((struct inode
*, int, ufs_daddr_t
, int));
89 static ufs_daddr_t ffs_alloccgblk
__P((struct fs
*, struct cg
*, ufs_daddr_t
));
90 static ufs_daddr_t ffs_clusteralloc
__P((struct inode
*, int, ufs_daddr_t
,
92 static ino_t ffs_dirpref
__P((struct fs
*));
93 static ufs_daddr_t ffs_fragextend
__P((struct inode
*, int, long, int, int));
94 static void ffs_fserr
__P((struct fs
*, u_int
, char *));
95 static u_long ffs_hashalloc
96 __P((struct inode
*, int, long, int, u_int32_t (*)()));
97 static ino_t ffs_nodealloccg
__P((struct inode
*, int, ufs_daddr_t
, int));
98 static ufs_daddr_t ffs_mapsearch
__P((struct fs
*, struct cg
*, ufs_daddr_t
,
102 * Allocate a block in the file system.
104 * The size of the requested block is given, which must be some
105 * multiple of fs_fsize and <= fs_bsize.
106 * A preference may be optionally specified. If a preference is given
107 * the following hierarchy is used to allocate a block:
108 * 1) allocate the requested block.
109 * 2) allocate a rotationally optimal block in the same cylinder.
110 * 3) allocate a block in the same cylinder group.
111 * 4) quadradically rehash into other cylinder groups, until an
112 * available block is located.
113 * If no block preference is given the following heirarchy is used
114 * to allocate a block:
115 * 1) allocate a block in the cylinder group that contains the
116 * inode for the file.
117 * 2) quadradically rehash into other cylinder groups, until an
118 * available block is located.
120 ffs_alloc(ip
, lbn
, bpref
, size
, cred
, bnp
)
121 register struct inode
*ip
;
122 ufs_daddr_t lbn
, bpref
;
127 register struct fs
*fs
;
134 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
135 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
136 ip
->i_dev
, fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
137 panic("ffs_alloc: bad size");
140 panic("ffs_alloc: missing credential\n");
141 #endif /* DIAGNOSTIC */
142 if (size
== fs
->fs_bsize
&& fs
->fs_cstotal
.cs_nbfree
== 0)
144 if (cred
->cr_uid
!= 0 && freespace(fs
, fs
->fs_minfree
) <= 0)
146 VOP_DEVBLOCKSIZE(ip
->i_devvp
,&devBlockSize
);
148 if (error
= chkdq(ip
, (int64_t)size
, cred
, 0))
151 if (bpref
>= fs
->fs_size
)
154 cg
= ino_to_cg(fs
, ip
->i_number
);
156 cg
= dtog(fs
, bpref
);
157 bno
= (ufs_daddr_t
)ffs_hashalloc(ip
, cg
, (long)bpref
, size
,
158 (u_int32_t (*)())ffs_alloccg
);
160 ip
->i_blocks
+= btodb(size
, devBlockSize
);
161 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
167 * Restore user's disk quota because allocation failed.
169 (void) chkdq(ip
, (int64_t)-size
, cred
, FORCE
);
172 ffs_fserr(fs
, cred
->cr_uid
, "file system full");
173 uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
178 * Reallocate a fragment to a bigger size
180 * The number and size of the old block is given, and a preference
181 * and new size is also specified. The allocator attempts to extend
182 * the original block. Failing that, the regular block allocator is
183 * invoked to get an appropriate block.
185 ffs_realloccg(ip
, lbprev
, bpref
, osize
, nsize
, cred
, bpp
)
186 register struct inode
*ip
;
193 register struct fs
*fs
;
195 int cg
, request
, error
;
196 ufs_daddr_t bprev
, bno
;
202 if ((u_int
)osize
> fs
->fs_bsize
|| fragoff(fs
, osize
) != 0 ||
203 (u_int
)nsize
> fs
->fs_bsize
|| fragoff(fs
, nsize
) != 0) {
205 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
206 ip
->i_dev
, fs
->fs_bsize
, osize
, nsize
, fs
->fs_fsmnt
);
207 panic("ffs_realloccg: bad size");
210 panic("ffs_realloccg: missing credential\n");
211 #endif /* DIAGNOSTIC */
212 if (cred
->cr_uid
!= 0 && freespace(fs
, fs
->fs_minfree
) <= 0)
214 if ((bprev
= ip
->i_db
[lbprev
]) == 0) {
215 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
216 ip
->i_dev
, fs
->fs_bsize
, bprev
, fs
->fs_fsmnt
);
217 panic("ffs_realloccg: bad bprev");
220 * Allocate the extra space in the buffer.
222 if (error
= bread(ITOV(ip
), lbprev
, osize
, NOCRED
, &bp
)) {
226 VOP_DEVBLOCKSIZE(ip
->i_devvp
,&devBlockSize
);
229 if (error
= chkdq(ip
, (int64_t)(nsize
- osize
), cred
, 0))
236 * Check for extension in the existing location.
238 cg
= dtog(fs
, bprev
);
239 if (bno
= ffs_fragextend(ip
, cg
, (long)bprev
, osize
, nsize
)) {
240 if (bp
->b_blkno
!= fsbtodb(fs
, bno
))
241 panic("bad blockno");
242 ip
->i_blocks
+= btodb(nsize
- osize
, devBlockSize
);
243 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
245 bp
->b_flags
|= B_DONE
;
246 bzero((char *)bp
->b_data
+ osize
, (u_int
)nsize
- osize
);
251 * Allocate a new disk location.
253 if (bpref
>= fs
->fs_size
)
255 switch ((int)fs
->fs_optim
) {
258 * Allocate an exact sized fragment. Although this makes
259 * best use of space, we will waste time relocating it if
260 * the file continues to grow. If the fragmentation is
261 * less than half of the minimum free reserve, we choose
262 * to begin optimizing for time.
265 if (fs
->fs_minfree
< 5 ||
266 fs
->fs_cstotal
.cs_nffree
>
267 fs
->fs_dsize
* fs
->fs_minfree
/ (2 * 100))
269 log(LOG_NOTICE
, "%s: optimization changed from SPACE to TIME\n",
271 fs
->fs_optim
= FS_OPTTIME
;
275 * At this point we have discovered a file that is trying to
276 * grow a small fragment to a larger fragment. To save time,
277 * we allocate a full sized block, then free the unused portion.
278 * If the file continues to grow, the `ffs_fragextend' call
279 * above will be able to grow it in place without further
280 * copying. If aberrant programs cause disk fragmentation to
281 * grow within 2% of the free reserve, we choose to begin
282 * optimizing for space.
284 request
= fs
->fs_bsize
;
285 if (fs
->fs_cstotal
.cs_nffree
<
286 fs
->fs_dsize
* (fs
->fs_minfree
- 2) / 100)
288 log(LOG_NOTICE
, "%s: optimization changed from TIME to SPACE\n",
290 fs
->fs_optim
= FS_OPTSPACE
;
293 printf("dev = 0x%x, optim = %d, fs = %s\n",
294 ip
->i_dev
, fs
->fs_optim
, fs
->fs_fsmnt
);
295 panic("ffs_realloccg: bad optim");
298 bno
= (ufs_daddr_t
)ffs_hashalloc(ip
, cg
, (long)bpref
, request
,
299 (u_int32_t (*)())ffs_alloccg
);
301 bp
->b_blkno
= fsbtodb(fs
, bno
);
302 ffs_blkfree(ip
, bprev
, (long)osize
);
304 ffs_blkfree(ip
, bno
+ numfrags(fs
, nsize
),
305 (long)(request
- nsize
));
306 ip
->i_blocks
+= btodb(nsize
- osize
, devBlockSize
);
307 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
309 bp
->b_flags
|= B_DONE
;
310 bzero((char *)bp
->b_data
+ osize
, (u_int
)nsize
- osize
);
316 * Restore user's disk quota because allocation failed.
318 (void) chkdq(ip
, (int64_t)-(nsize
- osize
), cred
, FORCE
);
325 ffs_fserr(fs
, cred
->cr_uid
, "file system full");
326 uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
331 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
333 * The vnode and an array of buffer pointers for a range of sequential
334 * logical blocks to be made contiguous is given. The allocator attempts
335 * to find a range of sequential blocks starting as close as possible to
336 * an fs_rotdelay offset from the end of the allocation for the logical
337 * block immediately preceeding the current range. If successful, the
338 * physical block numbers in the buffer pointers and in the inode are
339 * changed to reflect the new allocation. If unsuccessful, the allocation
340 * is left unchanged. The success in doing the reallocation is returned.
341 * Note that the error return is not reflected back to the user. Rather
342 * the previous block allocation will be used.
344 /* Note: This routine is unused in UBC cluster I/O */
347 int doreallocblks
= 1;
351 struct vop_reallocblks_args
*ap
;
357 * Allocate an inode in the file system.
359 * If allocating a directory, use ffs_dirpref to select the inode.
360 * If allocating in a directory, the following hierarchy is followed:
361 * 1) allocate the preferred inode.
362 * 2) allocate an inode in the same cylinder group.
363 * 3) quadradically rehash into other cylinder groups, until an
364 * available inode is located.
365 * If no inode preference is given the following heirarchy is used
366 * to allocate an inode:
367 * 1) allocate an inode in cylinder group 0.
368 * 2) quadradically rehash into other cylinder groups, until an
369 * available inode is located.
373 struct vop_valloc_args
/* {
376 struct ucred *a_cred;
377 struct vnode **a_vpp;
380 register struct vnode
*pvp
= ap
->a_pvp
;
381 register struct inode
*pip
;
382 register struct fs
*fs
;
383 register struct inode
*ip
;
384 mode_t mode
= ap
->a_mode
;
391 if (fs
->fs_cstotal
.cs_nifree
== 0)
394 if ((mode
& IFMT
) == IFDIR
)
395 ipref
= ffs_dirpref(fs
);
397 ipref
= pip
->i_number
;
398 if (ipref
>= fs
->fs_ncg
* fs
->fs_ipg
)
400 cg
= ino_to_cg(fs
, ipref
);
401 ino
= (ino_t
)ffs_hashalloc(pip
, cg
, (long)ipref
, mode
, ffs_nodealloccg
);
404 error
= VFS_VGET(pvp
->v_mount
, ino
, ap
->a_vpp
);
406 VOP_VFREE(pvp
, ino
, mode
);
409 ip
= VTOI(*ap
->a_vpp
);
411 printf("mode = 0%o, inum = %d, fs = %s\n",
412 ip
->i_mode
, ip
->i_number
, fs
->fs_fsmnt
);
413 panic("ffs_valloc: dup alloc");
415 if (ip
->i_blocks
) { /* XXX */
416 printf("free inode %s/%d had %d blocks\n",
417 fs
->fs_fsmnt
, ino
, ip
->i_blocks
);
422 * Set up a new generation number for this inode.
424 if (++nextgennumber
< (u_long
)time
.tv_sec
)
425 nextgennumber
= time
.tv_sec
;
426 ip
->i_gen
= nextgennumber
;
429 ffs_fserr(fs
, ap
->a_cred
->cr_uid
, "out of inodes");
430 uprintf("\n%s: create/symlink failed, no inodes free\n", fs
->fs_fsmnt
);
435 * Find a cylinder to place a directory.
437 * The policy implemented by this algorithm is to select from
438 * among those cylinder groups with above the average number of
439 * free inodes, the one with the smallest number of directories.
443 register struct fs
*fs
;
445 int cg
, minndir
, mincg
, avgifree
;
447 avgifree
= fs
->fs_cstotal
.cs_nifree
/ fs
->fs_ncg
;
448 minndir
= fs
->fs_ipg
;
450 for (cg
= 0; cg
< fs
->fs_ncg
; cg
++)
451 if (fs
->fs_cs(fs
, cg
).cs_ndir
< minndir
&&
452 fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
) {
454 minndir
= fs
->fs_cs(fs
, cg
).cs_ndir
;
456 return ((ino_t
)(fs
->fs_ipg
* mincg
));
460 * Select the desired position for the next block in a file. The file is
461 * logically divided into sections. The first section is composed of the
462 * direct blocks. Each additional section contains fs_maxbpg blocks.
464 * If no blocks have been allocated in the first section, the policy is to
465 * request a block in the same cylinder group as the inode that describes
466 * the file. If no blocks have been allocated in any other section, the
467 * policy is to place the section in a cylinder group with a greater than
468 * average number of free blocks. An appropriate cylinder group is found
469 * by using a rotor that sweeps the cylinder groups. When a new group of
470 * blocks is needed, the sweep begins in the cylinder group following the
471 * cylinder group from which the previous allocation was made. The sweep
472 * continues until a cylinder group with greater than the average number
473 * of free blocks is found. If the allocation is for the first block in an
474 * indirect block, the information on the previous allocation is unavailable;
475 * here a best guess is made based upon the logical block number being
478 * If a section is already partially allocated, the policy is to
479 * contiguously allocate fs_maxcontig blocks. The end of one of these
480 * contiguous blocks and the beginning of the next is physically separated
481 * so that the disk head will be in transit between them for at least
482 * fs_rotdelay milliseconds. This is to allow time for the processor to
483 * schedule another I/O transfer.
486 ffs_blkpref(ip
, lbn
, indx
, bap
)
492 register struct fs
*fs
;
494 int avgbfree
, startcg
;
498 struct vnode
*vp
=ITOV(ip
);
499 struct mount
*mp
=vp
->v_mount
;
500 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
501 #endif /* REV_ENDIAN_FS */
507 if (bap
!= &ip
->i_db
[0])
508 prev
= NXSwapLong(bap
[indx
- 1]);
510 prev
= bap
[indx
- 1];
511 } else prev
= bap
[indx
- 1];
513 if (indx
% fs
->fs_maxbpg
== 0 || prev
== 0)
514 #else /* REV_ENDIAN_FS */
515 if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0)
516 #endif /* REV_ENDIAN_FS */
519 cg
= ino_to_cg(fs
, ip
->i_number
);
520 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
523 * Find a cylinder with greater than average number of
524 * unused data blocks.
527 if (indx
== 0 || prev
== 0)
528 #else /* REV_ENDIAN_FS */
529 if (indx
== 0 || bap
[indx
- 1] == 0)
530 #endif /* REV_ENDIAN_FS */
532 ino_to_cg(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
535 startcg
= dtog(fs
, prev
) + 1;
536 #else /* REV_ENDIAN_FS */
537 startcg
= dtog(fs
, bap
[indx
- 1]) + 1;
538 #endif /* REV_ENDIAN_FS */
539 startcg
%= fs
->fs_ncg
;
540 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
541 for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
542 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
544 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
546 for (cg
= 0; cg
<= startcg
; cg
++)
547 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
549 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
554 * One or more previous blocks have been laid out. If less
555 * than fs_maxcontig previous blocks are contiguous, the
556 * next block is requested contiguously, otherwise it is
557 * requested rotationally delayed by fs_rotdelay milliseconds.
561 nextblk
= prev
+ fs
->fs_frag
;
562 if (indx
< fs
->fs_maxcontig
) {
565 if (bap
!= &ip
->i_db
[0])
566 prev
= NXSwapLong(bap
[indx
- fs
->fs_maxcontig
]);
568 prev
= bap
[indx
- fs
->fs_maxcontig
];
569 if (prev
+ blkstofrags(fs
, fs
->fs_maxcontig
) != nextblk
)
572 #endif /* REV_ENDIAN_FS */
573 nextblk
= bap
[indx
- 1] + fs
->fs_frag
;
574 if (indx
< fs
->fs_maxcontig
|| bap
[indx
- fs
->fs_maxcontig
] +
575 blkstofrags(fs
, fs
->fs_maxcontig
) != nextblk
)
579 #endif /* REV_ENDIAN_FS */
580 if (fs
->fs_rotdelay
!= 0)
582 * Here we convert ms of delay to frags as:
583 * (frags) = (ms) * (rev/sec) * (sect/rev) /
584 * ((sect/frag) * (ms/sec))
585 * then round up to the next block.
587 nextblk
+= roundup(fs
->fs_rotdelay
* fs
->fs_rps
* fs
->fs_nsect
/
588 (NSPF(fs
) * 1000), fs
->fs_frag
);
593 * Implement the cylinder overflow algorithm.
595 * The policy implemented by this algorithm is:
596 * 1) allocate the block in its requested cylinder group.
597 * 2) quadradically rehash on the cylinder group number.
598 * 3) brute force search for a free block.
602 ffs_hashalloc(ip
, cg
, pref
, size
, allocator
)
606 int size
; /* size for data blocks, mode for inodes */
607 u_int32_t (*allocator
)();
609 register struct fs
*fs
;
615 * 1: preferred cylinder group
617 result
= (*allocator
)(ip
, cg
, pref
, size
);
621 * 2: quadratic rehash
623 for (i
= 1; i
< fs
->fs_ncg
; i
*= 2) {
625 if (cg
>= fs
->fs_ncg
)
627 result
= (*allocator
)(ip
, cg
, 0, size
);
632 * 3: brute force search
633 * Note that we start at i == 2, since 0 was checked initially,
634 * and 1 is always checked in the quadratic rehash.
636 cg
= (icg
+ 2) % fs
->fs_ncg
;
637 for (i
= 2; i
< fs
->fs_ncg
; i
++) {
638 result
= (*allocator
)(ip
, cg
, 0, size
);
642 if (cg
== fs
->fs_ncg
)
649 * Determine whether a fragment can be extended.
651 * Check to see if the necessary fragments are available, and
652 * if they are, allocate them.
655 ffs_fragextend(ip
, cg
, bprev
, osize
, nsize
)
661 register struct fs
*fs
;
662 register struct cg
*cgp
;
668 struct vnode
*vp
=ITOV(ip
);
669 struct mount
*mp
=vp
->v_mount
;
670 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
671 #endif /* REV_ENDIAN_FS */
674 if (fs
->fs_cs(fs
, cg
).cs_nffree
< numfrags(fs
, nsize
- osize
))
676 frags
= numfrags(fs
, nsize
); /* number of fragments needed */
677 bbase
= fragnum(fs
, bprev
); /* offset in a frag (it is mod fragsize */
678 if (bbase
> fragnum(fs
, (bprev
+ frags
- 1))) {
679 /* cannot extend across a block boundary */
682 /* read corresponding cylinder group info */
683 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
684 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
689 cgp
= (struct cg
*)bp
->b_data
;
692 byte_swap_cgin(cgp
, fs
);
694 #endif /* REV_ENDIAN_FS */
696 if (!cg_chkmagic(cgp
)) {
699 byte_swap_cgout(cgp
,fs
);
700 #endif /* REV_ENDIAN_FS */
704 cgp
->cg_time
= time
.tv_sec
;
705 bno
= dtogd(fs
, bprev
);
706 for (i
= numfrags(fs
, osize
); i
< frags
; i
++)
707 if (isclr(cg_blksfree(cgp
), bno
+ i
)) {
710 byte_swap_cgout(cgp
,fs
);
711 #endif /* REV_ENDIAN_FS */
716 * the current fragment can be extended
717 * deduct the count on fragment being extended into
718 * increase the count on the remaining fragment (if any)
719 * allocate the extended piece
721 for (i
= frags
; i
< fs
->fs_frag
- bbase
; i
++)
722 if (isclr(cg_blksfree(cgp
), bno
+ i
))
724 cgp
->cg_frsum
[i
- numfrags(fs
, osize
)]--;
726 cgp
->cg_frsum
[i
- frags
]++;
727 for (i
= numfrags(fs
, osize
); i
< frags
; i
++) {
728 clrbit(cg_blksfree(cgp
), bno
+ i
);
729 cgp
->cg_cs
.cs_nffree
--;
730 fs
->fs_cstotal
.cs_nffree
--;
731 fs
->fs_cs(fs
, cg
).cs_nffree
--;
736 byte_swap_cgout(cgp
,fs
);
737 #endif /* REV_ENDIAN_FS */
743 * Determine whether a block can be allocated.
745 * Check to see if a block of the appropriate size is available,
746 * and if it is, allocate it.
749 ffs_alloccg(ip
, cg
, bpref
, size
)
755 register struct fs
*fs
;
756 register struct cg
*cgp
;
759 int error
, bno
, frags
, allocsiz
;
761 struct vnode
*vp
=ITOV(ip
);
762 struct mount
*mp
=vp
->v_mount
;
763 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
764 #endif /* REV_ENDIAN_FS */
767 if (fs
->fs_cs(fs
, cg
).cs_nbfree
== 0 && size
== fs
->fs_bsize
)
769 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
770 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
775 cgp
= (struct cg
*)bp
->b_data
;
778 byte_swap_cgin(cgp
,fs
);
779 #endif /* REV_ENDIAN_FS */
780 if (!cg_chkmagic(cgp
) ||
781 (cgp
->cg_cs
.cs_nbfree
== 0 && size
== fs
->fs_bsize
)) {
784 byte_swap_cgout(cgp
,fs
);
785 #endif /* REV_ENDIAN_FS */
789 cgp
->cg_time
= time
.tv_sec
;
790 if (size
== fs
->fs_bsize
) {
791 bno
= ffs_alloccgblk(fs
, cgp
, bpref
);
794 byte_swap_cgout(cgp
,fs
);
795 #endif /* REV_ENDIAN_FS */
800 * check to see if any fragments are already available
801 * allocsiz is the size which will be allocated, hacking
802 * it down to a smaller size if necessary
804 frags
= numfrags(fs
, size
);
805 for (allocsiz
= frags
; allocsiz
< fs
->fs_frag
; allocsiz
++)
806 if (cgp
->cg_frsum
[allocsiz
] != 0)
808 if (allocsiz
== fs
->fs_frag
) {
810 * no fragments were available, so a block will be
811 * allocated, and hacked up
813 if (cgp
->cg_cs
.cs_nbfree
== 0) {
816 byte_swap_cgout(cgp
,fs
);
817 #endif /* REV_ENDIAN_FS */
821 bno
= ffs_alloccgblk(fs
, cgp
, bpref
);
822 bpref
= dtogd(fs
, bno
);
823 for (i
= frags
; i
< fs
->fs_frag
; i
++)
824 setbit(cg_blksfree(cgp
), bpref
+ i
);
825 i
= fs
->fs_frag
- frags
;
826 cgp
->cg_cs
.cs_nffree
+= i
;
827 fs
->fs_cstotal
.cs_nffree
+= i
;
828 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
833 byte_swap_cgout(cgp
,fs
);
834 #endif /* REV_ENDIAN_FS */
838 bno
= ffs_mapsearch(fs
, cgp
, bpref
, allocsiz
);
842 byte_swap_cgout(cgp
,fs
);
843 #endif /* REV_ENDIAN_FS */
847 for (i
= 0; i
< frags
; i
++)
848 clrbit(cg_blksfree(cgp
), bno
+ i
);
849 cgp
->cg_cs
.cs_nffree
-= frags
;
850 fs
->fs_cstotal
.cs_nffree
-= frags
;
851 fs
->fs_cs(fs
, cg
).cs_nffree
-= frags
;
853 cgp
->cg_frsum
[allocsiz
]--;
854 if (frags
!= allocsiz
)
855 cgp
->cg_frsum
[allocsiz
- frags
]++;
858 byte_swap_cgout(cgp
,fs
);
859 #endif /* REV_ENDIAN_FS */
861 return (cg
* fs
->fs_fpg
+ bno
);
865 * Allocate a block in a cylinder group.
867 * This algorithm implements the following policy:
868 * 1) allocate the requested block.
869 * 2) allocate a rotationally optimal block in the same cylinder.
870 * 3) allocate the next available block on the block rotor for the
871 * specified cylinder group.
872 * Note that this routine only allocates fs_bsize blocks; these
873 * blocks may be fragmented by the routine that allocates them.
876 ffs_alloccgblk(fs
, cgp
, bpref
)
877 register struct fs
*fs
;
878 register struct cg
*cgp
;
881 ufs_daddr_t bno
, blkno
;
882 int cylno
, pos
, delta
;
886 if (bpref
== 0 || dtog(fs
, bpref
) != cgp
->cg_cgx
) {
887 bpref
= cgp
->cg_rotor
;
890 bpref
= blknum(fs
, bpref
);
891 bpref
= dtogd(fs
, bpref
);
893 * if the requested block is available, use it
895 if (ffs_isblock(fs
, cg_blksfree(cgp
), fragstoblks(fs
, bpref
))) {
899 if (fs
->fs_nrpos
<= 1 || fs
->fs_cpc
== 0) {
901 * Block layout information is not available.
902 * Leaving bpref unchanged means we take the
903 * next available free block following the one
904 * we just allocated. Hopefully this will at
905 * least hit a track cache on drives of unknown
906 * geometry (e.g. SCSI).
911 * check for a block available on the same cylinder
913 cylno
= cbtocylno(fs
, bpref
);
914 if (cg_blktot(cgp
)[cylno
] == 0)
917 * check the summary information to see if a block is
918 * available in the requested cylinder starting at the
919 * requested rotational position and proceeding around.
921 cylbp
= cg_blks(fs
, cgp
, cylno
);
922 pos
= cbtorpos(fs
, bpref
);
923 for (i
= pos
; i
< fs
->fs_nrpos
; i
++)
926 if (i
== fs
->fs_nrpos
)
927 for (i
= 0; i
< pos
; i
++)
932 * found a rotational position, now find the actual
933 * block. A panic if none is actually there.
935 pos
= cylno
% fs
->fs_cpc
;
936 bno
= (cylno
- pos
) * fs
->fs_spc
/ NSPB(fs
);
937 if (fs_postbl(fs
, pos
)[i
] == -1) {
938 printf("pos = %d, i = %d, fs = %s\n",
939 pos
, i
, fs
->fs_fsmnt
);
940 panic("ffs_alloccgblk: cyl groups corrupted");
942 for (i
= fs_postbl(fs
, pos
)[i
];; ) {
943 if (ffs_isblock(fs
, cg_blksfree(cgp
), bno
+ i
)) {
944 bno
= blkstofrags(fs
, (bno
+ i
));
947 delta
= fs_rotbl(fs
)[i
];
949 delta
+ i
> fragstoblks(fs
, fs
->fs_fpg
))
953 printf("pos = %d, i = %d, fs = %s\n", pos
, i
, fs
->fs_fsmnt
);
954 panic("ffs_alloccgblk: can't find blk in cyl");
958 * no blocks in the requested cylinder, so take next
959 * available one in this cylinder group.
961 bno
= ffs_mapsearch(fs
, cgp
, bpref
, (int)fs
->fs_frag
);
966 blkno
= fragstoblks(fs
, bno
);
967 ffs_clrblock(fs
, cg_blksfree(cgp
), (long)blkno
);
968 ffs_clusteracct(fs
, cgp
, blkno
, -1);
969 cgp
->cg_cs
.cs_nbfree
--;
970 fs
->fs_cstotal
.cs_nbfree
--;
971 fs
->fs_cs(fs
, cgp
->cg_cgx
).cs_nbfree
--;
972 cylno
= cbtocylno(fs
, bno
);
973 cg_blks(fs
, cgp
, cylno
)[cbtorpos(fs
, bno
)]--;
974 cg_blktot(cgp
)[cylno
]--;
976 return (cgp
->cg_cgx
* fs
->fs_fpg
+ bno
);
980 * Determine whether a cluster can be allocated.
982 * We do not currently check for optimal rotational layout if there
983 * are multiple choices in the same cylinder group. Instead we just
984 * take the first one that we find following bpref.
987 ffs_clusteralloc(ip
, cg
, bpref
, len
)
993 register struct fs
*fs
;
994 register struct cg
*cgp
;
996 int i
, got
, run
, bno
, bit
, map
;
1000 struct vnode
*vp
=ITOV(ip
);
1001 struct mount
*mp
=vp
->v_mount
;
1002 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1003 #endif /* REV_ENDIAN_FS */
1006 if (fs
->fs_maxcluster
[cg
] < len
)
1008 if (bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)), (int)fs
->fs_cgsize
,
1011 cgp
= (struct cg
*)bp
->b_data
;
1014 byte_swap_cgin(cgp
,fs
);
1015 #endif /* REV_ENDIAN_FS */
1016 if (!cg_chkmagic(cgp
)) {
1019 byte_swap_cgout(cgp
,fs
);
1020 #endif /* REV_ENDIAN_FS */
1024 * Check to see if a cluster of the needed size (or bigger) is
1025 * available in this cylinder group.
1027 lp
= &cg_clustersum(cgp
)[len
];
1028 for (i
= len
; i
<= fs
->fs_contigsumsize
; i
++)
1031 if (i
> fs
->fs_contigsumsize
) {
1033 * This is the first time looking for a cluster in this
1034 * cylinder group. Update the cluster summary information
1035 * to reflect the true maximum sized cluster so that
1036 * future cluster allocation requests can avoid reading
1037 * the cylinder group map only to find no clusters.
1039 lp
= &cg_clustersum(cgp
)[len
- 1];
1040 for (i
= len
- 1; i
> 0; i
--)
1043 fs
->fs_maxcluster
[cg
] = i
;
1046 byte_swap_cgout(cgp
,fs
);
1047 #endif /* REV_ENDIAN_FS */
1051 * Search the cluster map to find a big enough cluster.
1052 * We take the first one that we find, even if it is larger
1053 * than we need as we prefer to get one close to the previous
1054 * block allocation. We do not search before the current
1055 * preference point as we do not want to allocate a block
1056 * that is allocated before the previous one (as we will
1057 * then have to wait for another pass of the elevator
1058 * algorithm before it will be read). We prefer to fail and
1059 * be recalled to try an allocation in the next cylinder group.
1061 if (dtog(fs
, bpref
) != cg
)
1064 bpref
= fragstoblks(fs
, dtogd(fs
, blknum(fs
, bpref
)));
1065 mapp
= &cg_clustersfree(cgp
)[bpref
/ NBBY
];
1067 bit
= 1 << (bpref
% NBBY
);
1068 for (run
= 0, got
= bpref
; got
< cgp
->cg_nclusterblks
; got
++) {
1069 if ((map
& bit
) == 0) {
1076 if ((got
& (NBBY
- 1)) != (NBBY
- 1)) {
1083 if (got
== cgp
->cg_nclusterblks
) {
1086 byte_swap_cgout(cgp
,fs
);
1087 #endif /* REV_ENDIAN_FS */
1091 * Allocate the cluster that we have found.
1093 for (i
= 1; i
<= len
; i
++)
1094 if (!ffs_isblock(fs
, cg_blksfree(cgp
), got
- run
+ i
))
1095 panic("ffs_clusteralloc: map mismatch");
1096 bno
= cg
* fs
->fs_fpg
+ blkstofrags(fs
, got
- run
+ 1);
1097 if (dtog(fs
, bno
) != cg
)
1098 panic("ffs_clusteralloc: allocated out of group");
1099 len
= blkstofrags(fs
, len
);
1100 for (i
= 0; i
< len
; i
+= fs
->fs_frag
)
1101 if ((got
= ffs_alloccgblk(fs
, cgp
, bno
+ i
)) != bno
+ i
)
1102 panic("ffs_clusteralloc: lost block");
1105 byte_swap_cgout(cgp
,fs
);
1106 #endif /* REV_ENDIAN_FS */
1116 * Determine whether an inode can be allocated.
1118 * Check to see if an inode is available, and if it is,
1119 * allocate it using the following policy:
1120 * 1) allocate the requested inode.
1121 * 2) allocate the next available inode after the requested
1122 * inode in the specified cylinder group.
1125 ffs_nodealloccg(ip
, cg
, ipref
, mode
)
1131 register struct fs
*fs
;
1132 register struct cg
*cgp
;
1134 int error
, start
, len
, loc
, map
, i
;
1136 struct vnode
*vp
=ITOV(ip
);
1137 struct mount
*mp
=vp
->v_mount
;
1138 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1139 #endif /* REV_ENDIAN_FS */
1142 if (fs
->fs_cs(fs
, cg
).cs_nifree
== 0)
1144 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1145 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1150 cgp
= (struct cg
*)bp
->b_data
;
1153 byte_swap_cgin(cgp
,fs
);
1154 #endif /* REV_ENDIAN_FS */
1155 if (!cg_chkmagic(cgp
) || cgp
->cg_cs
.cs_nifree
== 0) {
1158 byte_swap_cgout(cgp
,fs
);
1159 #endif /* REV_ENDIAN_FS */
1164 cgp
->cg_time
= time
.tv_sec
;
1166 ipref
%= fs
->fs_ipg
;
1167 if (isclr(cg_inosused(cgp
), ipref
))
1170 start
= cgp
->cg_irotor
/ NBBY
;
1171 len
= howmany(fs
->fs_ipg
- cgp
->cg_irotor
, NBBY
);
1172 loc
= skpc(0xff, len
, &cg_inosused(cgp
)[start
]);
1176 loc
= skpc(0xff, len
, &cg_inosused(cgp
)[0]);
1178 printf("cg = %d, irotor = %d, fs = %s\n",
1179 cg
, cgp
->cg_irotor
, fs
->fs_fsmnt
);
1180 panic("ffs_nodealloccg: map corrupted");
1184 i
= start
+ len
- loc
;
1185 map
= cg_inosused(cgp
)[i
];
1187 for (i
= 1; i
< (1 << NBBY
); i
<<= 1, ipref
++) {
1188 if ((map
& i
) == 0) {
1189 cgp
->cg_irotor
= ipref
;
1193 printf("fs = %s\n", fs
->fs_fsmnt
);
1194 panic("ffs_nodealloccg: block not in map");
1197 setbit(cg_inosused(cgp
), ipref
);
1198 cgp
->cg_cs
.cs_nifree
--;
1199 fs
->fs_cstotal
.cs_nifree
--;
1200 fs
->fs_cs(fs
, cg
).cs_nifree
--;
1202 if ((mode
& IFMT
) == IFDIR
) {
1203 cgp
->cg_cs
.cs_ndir
++;
1204 fs
->fs_cstotal
.cs_ndir
++;
1205 fs
->fs_cs(fs
, cg
).cs_ndir
++;
1209 byte_swap_cgout(cgp
,fs
);
1210 #endif /* REV_ENDIAN_FS */
1212 return (cg
* fs
->fs_ipg
+ ipref
);
1216 * Free a block or fragment.
1218 * The specified block or fragment is placed back in the
1219 * free map. If a fragment is deallocated, a possible
1220 * block reassembly is checked.
1222 ffs_blkfree(ip
, bno
, size
)
1223 register struct inode
*ip
;
1227 register struct fs
*fs
;
1228 register struct cg
*cgp
;
1231 int i
, error
, cg
, blk
, frags
, bbase
;
1233 struct vnode
*vp
=ITOV(ip
);
1234 struct mount
*mp
=vp
->v_mount
;
1235 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1236 #endif /* REV_ENDIAN_FS */
1238 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
1239 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
1240 ip
->i_dev
, fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
1241 panic("blkfree: bad size");
1244 if ((u_int
)bno
>= fs
->fs_size
) {
1245 printf("bad block %d, ino %d\n", bno
, ip
->i_number
);
1246 ffs_fserr(fs
, ip
->i_uid
, "bad block");
1249 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1250 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1255 cgp
= (struct cg
*)bp
->b_data
;
1258 byte_swap_cgin(cgp
,fs
);
1259 #endif /* REV_ENDIAN_FS */
1260 if (!cg_chkmagic(cgp
)) {
1263 byte_swap_cgout(cgp
,fs
);
1264 #endif /* REV_ENDIAN_FS */
1268 cgp
->cg_time
= time
.tv_sec
;
1269 bno
= dtogd(fs
, bno
);
1270 if (size
== fs
->fs_bsize
) {
1271 blkno
= fragstoblks(fs
, bno
);
1272 if (ffs_isblock(fs
, cg_blksfree(cgp
), blkno
)) {
1273 printf("dev = 0x%x, block = %d, fs = %s\n",
1274 ip
->i_dev
, bno
, fs
->fs_fsmnt
);
1275 panic("blkfree: freeing free block");
1277 ffs_setblock(fs
, cg_blksfree(cgp
), blkno
);
1278 ffs_clusteracct(fs
, cgp
, blkno
, 1);
1279 cgp
->cg_cs
.cs_nbfree
++;
1280 fs
->fs_cstotal
.cs_nbfree
++;
1281 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1282 i
= cbtocylno(fs
, bno
);
1283 cg_blks(fs
, cgp
, i
)[cbtorpos(fs
, bno
)]++;
1284 cg_blktot(cgp
)[i
]++;
1286 bbase
= bno
- fragnum(fs
, bno
);
1288 * decrement the counts associated with the old frags
1290 blk
= blkmap(fs
, cg_blksfree(cgp
), bbase
);
1291 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, -1);
1293 * deallocate the fragment
1295 frags
= numfrags(fs
, size
);
1296 for (i
= 0; i
< frags
; i
++) {
1297 if (isset(cg_blksfree(cgp
), bno
+ i
)) {
1298 printf("dev = 0x%x, block = %d, fs = %s\n",
1299 ip
->i_dev
, bno
+ i
, fs
->fs_fsmnt
);
1300 panic("blkfree: freeing free frag");
1302 setbit(cg_blksfree(cgp
), bno
+ i
);
1304 cgp
->cg_cs
.cs_nffree
+= i
;
1305 fs
->fs_cstotal
.cs_nffree
+= i
;
1306 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
1308 * add back in counts associated with the new frags
1310 blk
= blkmap(fs
, cg_blksfree(cgp
), bbase
);
1311 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, 1);
1313 * if a complete block has been reassembled, account for it
1315 blkno
= fragstoblks(fs
, bbase
);
1316 if (ffs_isblock(fs
, cg_blksfree(cgp
), blkno
)) {
1317 cgp
->cg_cs
.cs_nffree
-= fs
->fs_frag
;
1318 fs
->fs_cstotal
.cs_nffree
-= fs
->fs_frag
;
1319 fs
->fs_cs(fs
, cg
).cs_nffree
-= fs
->fs_frag
;
1320 ffs_clusteracct(fs
, cgp
, blkno
, 1);
1321 cgp
->cg_cs
.cs_nbfree
++;
1322 fs
->fs_cstotal
.cs_nbfree
++;
1323 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1324 i
= cbtocylno(fs
, bbase
);
1325 cg_blks(fs
, cgp
, i
)[cbtorpos(fs
, bbase
)]++;
1326 cg_blktot(cgp
)[i
]++;
1332 byte_swap_cgout(cgp
,fs
);
1333 #endif /* REV_ENDIAN_FS */
1339 * Verify allocation of a block or fragment. Returns true if block or
1340 * fragment is allocated, false if it is free.
1342 ffs_checkblk(ip
, bno
, size
)
1350 int i
, error
, frags
, free
;
1352 struct vnode
*vp
=ITOV(ip
);
1353 struct mount
*mp
=vp
->v_mount
;
1354 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1355 #endif /* REV_ENDIAN_FS */
1358 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
1359 printf("bsize = %d, size = %d, fs = %s\n",
1360 fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
1361 panic("checkblk: bad size");
1363 if ((u_int
)bno
>= fs
->fs_size
)
1364 panic("checkblk: bad block %d", bno
);
1365 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, dtog(fs
, bno
))),
1366 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1371 cgp
= (struct cg
*)bp
->b_data
;
1374 byte_swap_cgin(cgp
,fs
);
1375 #endif /* REV_ENDIAN_FS */
1376 if (!cg_chkmagic(cgp
)) {
1379 byte_swap_cgout(cgp
,fs
);
1380 #endif /* REV_ENDIAN_FS */
1384 bno
= dtogd(fs
, bno
);
1385 if (size
== fs
->fs_bsize
) {
1386 free
= ffs_isblock(fs
, cg_blksfree(cgp
), fragstoblks(fs
, bno
));
1388 frags
= numfrags(fs
, size
);
1389 for (free
= 0, i
= 0; i
< frags
; i
++)
1390 if (isset(cg_blksfree(cgp
), bno
+ i
))
1392 if (free
!= 0 && free
!= frags
)
1393 panic("checkblk: partially free fragment");
1397 byte_swap_cgout(cgp
,fs
);
1398 #endif /* REV_ENDIAN_FS */
1402 #endif /* DIAGNOSTIC */
1407 * The specified inode is placed back in the free map.
1411 struct vop_vfree_args
/* {
1412 struct vnode *a_pvp;
1417 register struct fs
*fs
;
1418 register struct cg
*cgp
;
1419 register struct inode
*pip
;
1420 ino_t ino
= ap
->a_ino
;
1424 struct vnode
*vp
=ap
->a_pvp
;
1425 struct mount
*mp
=vp
->v_mount
;
1426 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1427 #endif /* REV_ENDIAN_FS */
1429 pip
= VTOI(ap
->a_pvp
);
1431 if ((u_int
)ino
>= fs
->fs_ipg
* fs
->fs_ncg
)
1432 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n",
1433 pip
->i_dev
, ino
, fs
->fs_fsmnt
);
1434 cg
= ino_to_cg(fs
, ino
);
1435 error
= bread(pip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1436 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1441 cgp
= (struct cg
*)bp
->b_data
;
1444 byte_swap_cgin(cgp
,fs
);
1445 #endif /* REV_ENDIAN_FS */
1446 if (!cg_chkmagic(cgp
)) {
1449 byte_swap_cgout(cgp
,fs
);
1450 #endif /* REV_ENDIAN_FS */
1454 cgp
->cg_time
= time
.tv_sec
;
1456 if (isclr(cg_inosused(cgp
), ino
)) {
1457 printf("dev = 0x%x, ino = %d, fs = %s\n",
1458 pip
->i_dev
, ino
, fs
->fs_fsmnt
);
1459 if (fs
->fs_ronly
== 0)
1460 panic("ifree: freeing free inode");
1462 clrbit(cg_inosused(cgp
), ino
);
1463 if (ino
< cgp
->cg_irotor
)
1464 cgp
->cg_irotor
= ino
;
1465 cgp
->cg_cs
.cs_nifree
++;
1466 fs
->fs_cstotal
.cs_nifree
++;
1467 fs
->fs_cs(fs
, cg
).cs_nifree
++;
1468 if ((ap
->a_mode
& IFMT
) == IFDIR
) {
1469 cgp
->cg_cs
.cs_ndir
--;
1470 fs
->fs_cstotal
.cs_ndir
--;
1471 fs
->fs_cs(fs
, cg
).cs_ndir
--;
1476 byte_swap_cgout(cgp
,fs
);
1477 #endif /* REV_ENDIAN_FS */
1483 * Find a block of the specified size in the specified cylinder group.
1485 * It is a panic if a request is made to find a block if none are
1489 ffs_mapsearch(fs
, cgp
, bpref
, allocsiz
)
1490 register struct fs
*fs
;
1491 register struct cg
*cgp
;
1496 int start
, len
, loc
, i
;
1497 int blk
, field
, subfield
, pos
;
1500 * find the fragment by searching through the free block
1501 * map for an appropriate bit pattern
1504 start
= dtogd(fs
, bpref
) / NBBY
;
1506 start
= cgp
->cg_frotor
/ NBBY
;
1507 len
= howmany(fs
->fs_fpg
, NBBY
) - start
;
1508 loc
= scanc((u_int
)len
, (u_char
*)&cg_blksfree(cgp
)[start
],
1509 (u_char
*)fragtbl
[fs
->fs_frag
],
1510 (u_char
)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
1514 loc
= scanc((u_int
)len
, (u_char
*)&cg_blksfree(cgp
)[0],
1515 (u_char
*)fragtbl
[fs
->fs_frag
],
1516 (u_char
)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
1518 printf("start = %d, len = %d, fs = %s\n",
1519 start
, len
, fs
->fs_fsmnt
);
1520 panic("ffs_alloccg: map corrupted");
1524 bno
= (start
+ len
- loc
) * NBBY
;
1525 cgp
->cg_frotor
= bno
;
1527 * found the byte in the map
1528 * sift through the bits to find the selected frag
1530 for (i
= bno
+ NBBY
; bno
< i
; bno
+= fs
->fs_frag
) {
1531 blk
= blkmap(fs
, cg_blksfree(cgp
), bno
);
1533 field
= around
[allocsiz
];
1534 subfield
= inside
[allocsiz
];
1535 for (pos
= 0; pos
<= fs
->fs_frag
- allocsiz
; pos
++) {
1536 if ((blk
& field
) == subfield
)
1542 printf("bno = %d, fs = %s\n", bno
, fs
->fs_fsmnt
);
1543 panic("ffs_alloccg: block not in map");
1548 * Update the cluster map because of an allocation or free.
1550 * Cnt == 1 means free; cnt == -1 means allocating.
1552 ffs_clusteracct(fs
, cgp
, blkno
, cnt
)
1560 u_char
*freemapp
, *mapp
;
1561 int i
, start
, end
, forw
, back
, map
, bit
;
1563 if (fs
->fs_contigsumsize
<= 0)
1565 freemapp
= cg_clustersfree(cgp
);
1566 sump
= cg_clustersum(cgp
);
1568 * Allocate or clear the actual block.
1571 setbit(freemapp
, blkno
);
1573 clrbit(freemapp
, blkno
);
1575 * Find the size of the cluster going forward.
1578 end
= start
+ fs
->fs_contigsumsize
;
1579 if (end
>= cgp
->cg_nclusterblks
)
1580 end
= cgp
->cg_nclusterblks
;
1581 mapp
= &freemapp
[start
/ NBBY
];
1583 bit
= 1 << (start
% NBBY
);
1584 for (i
= start
; i
< end
; i
++) {
1585 if ((map
& bit
) == 0)
1587 if ((i
& (NBBY
- 1)) != (NBBY
- 1)) {
1596 * Find the size of the cluster going backward.
1599 end
= start
- fs
->fs_contigsumsize
;
1602 mapp
= &freemapp
[start
/ NBBY
];
1604 bit
= 1 << (start
% NBBY
);
1605 for (i
= start
; i
> end
; i
--) {
1606 if ((map
& bit
) == 0)
1608 if ((i
& (NBBY
- 1)) != 0) {
1612 bit
= 1 << (NBBY
- 1);
1617 * Account for old cluster and the possibly new forward and
1620 i
= back
+ forw
+ 1;
1621 if (i
> fs
->fs_contigsumsize
)
1622 i
= fs
->fs_contigsumsize
;
1629 * Update cluster summary information.
1631 lp
= &sump
[fs
->fs_contigsumsize
];
1632 for (i
= fs
->fs_contigsumsize
; i
> 0; i
--)
1635 fs
->fs_maxcluster
[cgp
->cg_cgx
] = i
;
1639 * Fserr prints the name of a file system with an error diagnostic.
1641 * The form of the error message is:
1645 ffs_fserr(fs
, uid
, cp
)
1651 log(LOG_ERR
, "uid %d on %s: %s\n", uid
, fs
->fs_fsmnt
, cp
);