2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
57 #include <rev_endian_fs.h>
58 #include <vm/vm_pager.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
64 #include <sys/vnode.h>
65 #include <sys/mount.h>
66 #include <sys/kernel.h>
67 #include <sys/syslog.h>
71 #include <ufs/ufs/quota.h>
72 #include <ufs/ufs/inode.h>
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
78 #include <ufs/ufs/ufs_byte_order.h>
79 #include <architecture/byte_order.h>
80 #endif /* REV_ENDIAN_FS */
82 extern u_long nextgennumber
;
84 static ufs_daddr_t ffs_alloccg
__P((struct inode
*, int, ufs_daddr_t
, int));
85 static ufs_daddr_t ffs_alloccgblk
__P((struct fs
*, struct cg
*, ufs_daddr_t
));
86 static ufs_daddr_t ffs_clusteralloc
__P((struct inode
*, int, ufs_daddr_t
,
88 static ino_t ffs_dirpref
__P((struct fs
*));
89 static ufs_daddr_t ffs_fragextend
__P((struct inode
*, int, long, int, int));
90 static void ffs_fserr
__P((struct fs
*, u_int
, char *));
91 static u_long ffs_hashalloc
92 __P((struct inode
*, int, long, int, u_int32_t (*)()));
93 static ino_t ffs_nodealloccg
__P((struct inode
*, int, ufs_daddr_t
, int));
94 static ufs_daddr_t ffs_mapsearch
__P((struct fs
*, struct cg
*, ufs_daddr_t
,
98 * Allocate a block in the file system.
100 * The size of the requested block is given, which must be some
101 * multiple of fs_fsize and <= fs_bsize.
102 * A preference may be optionally specified. If a preference is given
103 * the following hierarchy is used to allocate a block:
104 * 1) allocate the requested block.
105 * 2) allocate a rotationally optimal block in the same cylinder.
106 * 3) allocate a block in the same cylinder group.
107 * 4) quadradically rehash into other cylinder groups, until an
108 * available block is located.
109 * If no block preference is given the following heirarchy is used
110 * to allocate a block:
111 * 1) allocate a block in the cylinder group that contains the
112 * inode for the file.
113 * 2) quadradically rehash into other cylinder groups, until an
114 * available block is located.
116 ffs_alloc(ip
, lbn
, bpref
, size
, cred
, bnp
)
117 register struct inode
*ip
;
118 ufs_daddr_t lbn
, bpref
;
123 register struct fs
*fs
;
130 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
131 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
132 ip
->i_dev
, fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
133 panic("ffs_alloc: bad size");
136 panic("ffs_alloc: missing credential\n");
137 #endif /* DIAGNOSTIC */
138 if (size
== fs
->fs_bsize
&& fs
->fs_cstotal
.cs_nbfree
== 0)
140 if (cred
->cr_uid
!= 0 && freespace(fs
, fs
->fs_minfree
) <= 0)
142 VOP_DEVBLOCKSIZE(ip
->i_devvp
,&devBlockSize
);
144 if (error
= chkdq(ip
, (long)btodb(size
, devBlockSize
), cred
, 0))
147 if (bpref
>= fs
->fs_size
)
150 cg
= ino_to_cg(fs
, ip
->i_number
);
152 cg
= dtog(fs
, bpref
);
153 bno
= (ufs_daddr_t
)ffs_hashalloc(ip
, cg
, (long)bpref
, size
,
154 (u_int32_t (*)())ffs_alloccg
);
156 ip
->i_blocks
+= btodb(size
, devBlockSize
);
157 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
163 * Restore user's disk quota because allocation failed.
165 (void) chkdq(ip
, (long)-btodb(size
, devBlockSize
), cred
, FORCE
);
168 ffs_fserr(fs
, cred
->cr_uid
, "file system full");
169 uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
174 * Reallocate a fragment to a bigger size
176 * The number and size of the old block is given, and a preference
177 * and new size is also specified. The allocator attempts to extend
178 * the original block. Failing that, the regular block allocator is
179 * invoked to get an appropriate block.
181 ffs_realloccg(ip
, lbprev
, bpref
, osize
, nsize
, cred
, bpp
)
182 register struct inode
*ip
;
189 register struct fs
*fs
;
191 int cg
, request
, error
;
192 ufs_daddr_t bprev
, bno
;
198 if ((u_int
)osize
> fs
->fs_bsize
|| fragoff(fs
, osize
) != 0 ||
199 (u_int
)nsize
> fs
->fs_bsize
|| fragoff(fs
, nsize
) != 0) {
201 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
202 ip
->i_dev
, fs
->fs_bsize
, osize
, nsize
, fs
->fs_fsmnt
);
203 panic("ffs_realloccg: bad size");
206 panic("ffs_realloccg: missing credential\n");
207 #endif /* DIAGNOSTIC */
208 if (cred
->cr_uid
!= 0 && freespace(fs
, fs
->fs_minfree
) <= 0)
210 if ((bprev
= ip
->i_db
[lbprev
]) == 0) {
211 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
212 ip
->i_dev
, fs
->fs_bsize
, bprev
, fs
->fs_fsmnt
);
213 panic("ffs_realloccg: bad bprev");
216 * Allocate the extra space in the buffer.
218 if (error
= bread(ITOV(ip
), lbprev
, osize
, NOCRED
, &bp
)) {
222 VOP_DEVBLOCKSIZE(ip
->i_devvp
,&devBlockSize
);
225 if (error
= chkdq(ip
, (long)btodb(nsize
- osize
, devBlockSize
), cred
, 0))
232 * Check for extension in the existing location.
234 cg
= dtog(fs
, bprev
);
235 if (bno
= ffs_fragextend(ip
, cg
, (long)bprev
, osize
, nsize
)) {
236 if (bp
->b_blkno
!= fsbtodb(fs
, bno
))
237 panic("bad blockno");
238 ip
->i_blocks
+= btodb(nsize
- osize
, devBlockSize
);
239 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
241 bp
->b_flags
|= B_DONE
;
242 bzero((char *)bp
->b_data
+ osize
, (u_int
)nsize
- osize
);
247 * Allocate a new disk location.
249 if (bpref
>= fs
->fs_size
)
251 switch ((int)fs
->fs_optim
) {
254 * Allocate an exact sized fragment. Although this makes
255 * best use of space, we will waste time relocating it if
256 * the file continues to grow. If the fragmentation is
257 * less than half of the minimum free reserve, we choose
258 * to begin optimizing for time.
261 if (fs
->fs_minfree
< 5 ||
262 fs
->fs_cstotal
.cs_nffree
>
263 fs
->fs_dsize
* fs
->fs_minfree
/ (2 * 100))
265 log(LOG_NOTICE
, "%s: optimization changed from SPACE to TIME\n",
267 fs
->fs_optim
= FS_OPTTIME
;
271 * At this point we have discovered a file that is trying to
272 * grow a small fragment to a larger fragment. To save time,
273 * we allocate a full sized block, then free the unused portion.
274 * If the file continues to grow, the `ffs_fragextend' call
275 * above will be able to grow it in place without further
276 * copying. If aberrant programs cause disk fragmentation to
277 * grow within 2% of the free reserve, we choose to begin
278 * optimizing for space.
280 request
= fs
->fs_bsize
;
281 if (fs
->fs_cstotal
.cs_nffree
<
282 fs
->fs_dsize
* (fs
->fs_minfree
- 2) / 100)
284 log(LOG_NOTICE
, "%s: optimization changed from TIME to SPACE\n",
286 fs
->fs_optim
= FS_OPTSPACE
;
289 printf("dev = 0x%x, optim = %d, fs = %s\n",
290 ip
->i_dev
, fs
->fs_optim
, fs
->fs_fsmnt
);
291 panic("ffs_realloccg: bad optim");
294 bno
= (ufs_daddr_t
)ffs_hashalloc(ip
, cg
, (long)bpref
, request
,
295 (u_int32_t (*)())ffs_alloccg
);
297 bp
->b_blkno
= fsbtodb(fs
, bno
);
298 ffs_blkfree(ip
, bprev
, (long)osize
);
300 ffs_blkfree(ip
, bno
+ numfrags(fs
, nsize
),
301 (long)(request
- nsize
));
302 ip
->i_blocks
+= btodb(nsize
- osize
, devBlockSize
);
303 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
305 bp
->b_flags
|= B_DONE
;
306 bzero((char *)bp
->b_data
+ osize
, (u_int
)nsize
- osize
);
312 * Restore user's disk quota because allocation failed.
314 (void) chkdq(ip
, (long)-btodb(nsize
- osize
, devBlockSize
), cred
, FORCE
);
321 ffs_fserr(fs
, cred
->cr_uid
, "file system full");
322 uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
327 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
329 * The vnode and an array of buffer pointers for a range of sequential
330 * logical blocks to be made contiguous is given. The allocator attempts
331 * to find a range of sequential blocks starting as close as possible to
332 * an fs_rotdelay offset from the end of the allocation for the logical
333 * block immediately preceeding the current range. If successful, the
334 * physical block numbers in the buffer pointers and in the inode are
335 * changed to reflect the new allocation. If unsuccessful, the allocation
336 * is left unchanged. The success in doing the reallocation is returned.
337 * Note that the error return is not reflected back to the user. Rather
338 * the previous block allocation will be used.
340 /* Note: This routine is unused in UBC cluster I/O */
343 int doreallocblks
= 1;
347 struct vop_reallocblks_args
*ap
;
353 * Allocate an inode in the file system.
355 * If allocating a directory, use ffs_dirpref to select the inode.
356 * If allocating in a directory, the following hierarchy is followed:
357 * 1) allocate the preferred inode.
358 * 2) allocate an inode in the same cylinder group.
359 * 3) quadradically rehash into other cylinder groups, until an
360 * available inode is located.
361 * If no inode preference is given the following heirarchy is used
362 * to allocate an inode:
363 * 1) allocate an inode in cylinder group 0.
364 * 2) quadradically rehash into other cylinder groups, until an
365 * available inode is located.
369 struct vop_valloc_args
/* {
372 struct ucred *a_cred;
373 struct vnode **a_vpp;
376 register struct vnode
*pvp
= ap
->a_pvp
;
377 register struct inode
*pip
;
378 register struct fs
*fs
;
379 register struct inode
*ip
;
380 mode_t mode
= ap
->a_mode
;
387 if (fs
->fs_cstotal
.cs_nifree
== 0)
390 if ((mode
& IFMT
) == IFDIR
)
391 ipref
= ffs_dirpref(fs
);
393 ipref
= pip
->i_number
;
394 if (ipref
>= fs
->fs_ncg
* fs
->fs_ipg
)
396 cg
= ino_to_cg(fs
, ipref
);
397 ino
= (ino_t
)ffs_hashalloc(pip
, cg
, (long)ipref
, mode
, ffs_nodealloccg
);
400 error
= VFS_VGET(pvp
->v_mount
, ino
, ap
->a_vpp
);
402 VOP_VFREE(pvp
, ino
, mode
);
405 ip
= VTOI(*ap
->a_vpp
);
407 printf("mode = 0%o, inum = %d, fs = %s\n",
408 ip
->i_mode
, ip
->i_number
, fs
->fs_fsmnt
);
409 panic("ffs_valloc: dup alloc");
411 if (ip
->i_blocks
) { /* XXX */
412 printf("free inode %s/%d had %d blocks\n",
413 fs
->fs_fsmnt
, ino
, ip
->i_blocks
);
418 * Set up a new generation number for this inode.
420 if (++nextgennumber
< (u_long
)time
.tv_sec
)
421 nextgennumber
= time
.tv_sec
;
422 ip
->i_gen
= nextgennumber
;
425 ffs_fserr(fs
, ap
->a_cred
->cr_uid
, "out of inodes");
426 uprintf("\n%s: create/symlink failed, no inodes free\n", fs
->fs_fsmnt
);
431 * Find a cylinder to place a directory.
433 * The policy implemented by this algorithm is to select from
434 * among those cylinder groups with above the average number of
435 * free inodes, the one with the smallest number of directories.
439 register struct fs
*fs
;
441 int cg
, minndir
, mincg
, avgifree
;
443 avgifree
= fs
->fs_cstotal
.cs_nifree
/ fs
->fs_ncg
;
444 minndir
= fs
->fs_ipg
;
446 for (cg
= 0; cg
< fs
->fs_ncg
; cg
++)
447 if (fs
->fs_cs(fs
, cg
).cs_ndir
< minndir
&&
448 fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
) {
450 minndir
= fs
->fs_cs(fs
, cg
).cs_ndir
;
452 return ((ino_t
)(fs
->fs_ipg
* mincg
));
456 * Select the desired position for the next block in a file. The file is
457 * logically divided into sections. The first section is composed of the
458 * direct blocks. Each additional section contains fs_maxbpg blocks.
460 * If no blocks have been allocated in the first section, the policy is to
461 * request a block in the same cylinder group as the inode that describes
462 * the file. If no blocks have been allocated in any other section, the
463 * policy is to place the section in a cylinder group with a greater than
464 * average number of free blocks. An appropriate cylinder group is found
465 * by using a rotor that sweeps the cylinder groups. When a new group of
466 * blocks is needed, the sweep begins in the cylinder group following the
467 * cylinder group from which the previous allocation was made. The sweep
468 * continues until a cylinder group with greater than the average number
469 * of free blocks is found. If the allocation is for the first block in an
470 * indirect block, the information on the previous allocation is unavailable;
471 * here a best guess is made based upon the logical block number being
474 * If a section is already partially allocated, the policy is to
475 * contiguously allocate fs_maxcontig blocks. The end of one of these
476 * contiguous blocks and the beginning of the next is physically separated
477 * so that the disk head will be in transit between them for at least
478 * fs_rotdelay milliseconds. This is to allow time for the processor to
479 * schedule another I/O transfer.
482 ffs_blkpref(ip
, lbn
, indx
, bap
)
488 register struct fs
*fs
;
490 int avgbfree
, startcg
;
494 struct vnode
*vp
=ITOV(ip
);
495 struct mount
*mp
=vp
->v_mount
;
496 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
497 #endif /* REV_ENDIAN_FS */
503 if (bap
!= &ip
->i_db
[0])
504 prev
= NXSwapLong(bap
[indx
- 1]);
506 prev
= bap
[indx
- 1];
507 } else prev
= bap
[indx
- 1];
509 if (indx
% fs
->fs_maxbpg
== 0 || prev
== 0)
510 #else /* REV_ENDIAN_FS */
511 if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0)
512 #endif /* REV_ENDIAN_FS */
515 cg
= ino_to_cg(fs
, ip
->i_number
);
516 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
519 * Find a cylinder with greater than average number of
520 * unused data blocks.
523 if (indx
== 0 || prev
== 0)
524 #else /* REV_ENDIAN_FS */
525 if (indx
== 0 || bap
[indx
- 1] == 0)
526 #endif /* REV_ENDIAN_FS */
528 ino_to_cg(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
531 startcg
= dtog(fs
, prev
) + 1;
532 #else /* REV_ENDIAN_FS */
533 startcg
= dtog(fs
, bap
[indx
- 1]) + 1;
534 #endif /* REV_ENDIAN_FS */
535 startcg
%= fs
->fs_ncg
;
536 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
537 for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
538 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
540 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
542 for (cg
= 0; cg
<= startcg
; cg
++)
543 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
545 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
550 * One or more previous blocks have been laid out. If less
551 * than fs_maxcontig previous blocks are contiguous, the
552 * next block is requested contiguously, otherwise it is
553 * requested rotationally delayed by fs_rotdelay milliseconds.
557 nextblk
= prev
+ fs
->fs_frag
;
558 if (indx
< fs
->fs_maxcontig
) {
561 if (bap
!= &ip
->i_db
[0])
562 prev
= NXSwapLong(bap
[indx
- fs
->fs_maxcontig
]);
564 prev
= bap
[indx
- fs
->fs_maxcontig
];
565 if (prev
+ blkstofrags(fs
, fs
->fs_maxcontig
) != nextblk
)
568 #endif /* REV_ENDIAN_FS */
569 nextblk
= bap
[indx
- 1] + fs
->fs_frag
;
570 if (indx
< fs
->fs_maxcontig
|| bap
[indx
- fs
->fs_maxcontig
] +
571 blkstofrags(fs
, fs
->fs_maxcontig
) != nextblk
)
575 #endif /* REV_ENDIAN_FS */
576 if (fs
->fs_rotdelay
!= 0)
578 * Here we convert ms of delay to frags as:
579 * (frags) = (ms) * (rev/sec) * (sect/rev) /
580 * ((sect/frag) * (ms/sec))
581 * then round up to the next block.
583 nextblk
+= roundup(fs
->fs_rotdelay
* fs
->fs_rps
* fs
->fs_nsect
/
584 (NSPF(fs
) * 1000), fs
->fs_frag
);
589 * Implement the cylinder overflow algorithm.
591 * The policy implemented by this algorithm is:
592 * 1) allocate the block in its requested cylinder group.
593 * 2) quadradically rehash on the cylinder group number.
594 * 3) brute force search for a free block.
598 ffs_hashalloc(ip
, cg
, pref
, size
, allocator
)
602 int size
; /* size for data blocks, mode for inodes */
603 u_int32_t (*allocator
)();
605 register struct fs
*fs
;
611 * 1: preferred cylinder group
613 result
= (*allocator
)(ip
, cg
, pref
, size
);
617 * 2: quadratic rehash
619 for (i
= 1; i
< fs
->fs_ncg
; i
*= 2) {
621 if (cg
>= fs
->fs_ncg
)
623 result
= (*allocator
)(ip
, cg
, 0, size
);
628 * 3: brute force search
629 * Note that we start at i == 2, since 0 was checked initially,
630 * and 1 is always checked in the quadratic rehash.
632 cg
= (icg
+ 2) % fs
->fs_ncg
;
633 for (i
= 2; i
< fs
->fs_ncg
; i
++) {
634 result
= (*allocator
)(ip
, cg
, 0, size
);
638 if (cg
== fs
->fs_ncg
)
645 * Determine whether a fragment can be extended.
647 * Check to see if the necessary fragments are available, and
648 * if they are, allocate them.
651 ffs_fragextend(ip
, cg
, bprev
, osize
, nsize
)
657 register struct fs
*fs
;
658 register struct cg
*cgp
;
664 struct vnode
*vp
=ITOV(ip
);
665 struct mount
*mp
=vp
->v_mount
;
666 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
667 #endif /* REV_ENDIAN_FS */
670 if (fs
->fs_cs(fs
, cg
).cs_nffree
< numfrags(fs
, nsize
- osize
))
672 frags
= numfrags(fs
, nsize
); /* number of fragments needed */
673 bbase
= fragnum(fs
, bprev
); /* offset in a frag (it is mod fragsize */
674 if (bbase
> fragnum(fs
, (bprev
+ frags
- 1))) {
675 /* cannot extend across a block boundary */
678 /* read corresponding cylinder group info */
679 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
680 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
685 cgp
= (struct cg
*)bp
->b_data
;
688 byte_swap_cgin(cgp
, fs
);
690 #endif /* REV_ENDIAN_FS */
692 if (!cg_chkmagic(cgp
)) {
695 byte_swap_cgout(cgp
,fs
);
696 #endif /* REV_ENDIAN_FS */
700 cgp
->cg_time
= time
.tv_sec
;
701 bno
= dtogd(fs
, bprev
);
702 for (i
= numfrags(fs
, osize
); i
< frags
; i
++)
703 if (isclr(cg_blksfree(cgp
), bno
+ i
)) {
706 byte_swap_cgout(cgp
,fs
);
707 #endif /* REV_ENDIAN_FS */
712 * the current fragment can be extended
713 * deduct the count on fragment being extended into
714 * increase the count on the remaining fragment (if any)
715 * allocate the extended piece
717 for (i
= frags
; i
< fs
->fs_frag
- bbase
; i
++)
718 if (isclr(cg_blksfree(cgp
), bno
+ i
))
720 cgp
->cg_frsum
[i
- numfrags(fs
, osize
)]--;
722 cgp
->cg_frsum
[i
- frags
]++;
723 for (i
= numfrags(fs
, osize
); i
< frags
; i
++) {
724 clrbit(cg_blksfree(cgp
), bno
+ i
);
725 cgp
->cg_cs
.cs_nffree
--;
726 fs
->fs_cstotal
.cs_nffree
--;
727 fs
->fs_cs(fs
, cg
).cs_nffree
--;
732 byte_swap_cgout(cgp
,fs
);
733 #endif /* REV_ENDIAN_FS */
739 * Determine whether a block can be allocated.
741 * Check to see if a block of the appropriate size is available,
742 * and if it is, allocate it.
745 ffs_alloccg(ip
, cg
, bpref
, size
)
751 register struct fs
*fs
;
752 register struct cg
*cgp
;
755 int error
, bno
, frags
, allocsiz
;
757 struct vnode
*vp
=ITOV(ip
);
758 struct mount
*mp
=vp
->v_mount
;
759 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
760 #endif /* REV_ENDIAN_FS */
763 if (fs
->fs_cs(fs
, cg
).cs_nbfree
== 0 && size
== fs
->fs_bsize
)
765 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
766 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
771 cgp
= (struct cg
*)bp
->b_data
;
774 byte_swap_cgin(cgp
,fs
);
775 #endif /* REV_ENDIAN_FS */
776 if (!cg_chkmagic(cgp
) ||
777 (cgp
->cg_cs
.cs_nbfree
== 0 && size
== fs
->fs_bsize
)) {
780 byte_swap_cgout(cgp
,fs
);
781 #endif /* REV_ENDIAN_FS */
785 cgp
->cg_time
= time
.tv_sec
;
786 if (size
== fs
->fs_bsize
) {
787 bno
= ffs_alloccgblk(fs
, cgp
, bpref
);
790 byte_swap_cgout(cgp
,fs
);
791 #endif /* REV_ENDIAN_FS */
796 * check to see if any fragments are already available
797 * allocsiz is the size which will be allocated, hacking
798 * it down to a smaller size if necessary
800 frags
= numfrags(fs
, size
);
801 for (allocsiz
= frags
; allocsiz
< fs
->fs_frag
; allocsiz
++)
802 if (cgp
->cg_frsum
[allocsiz
] != 0)
804 if (allocsiz
== fs
->fs_frag
) {
806 * no fragments were available, so a block will be
807 * allocated, and hacked up
809 if (cgp
->cg_cs
.cs_nbfree
== 0) {
812 byte_swap_cgout(cgp
,fs
);
813 #endif /* REV_ENDIAN_FS */
817 bno
= ffs_alloccgblk(fs
, cgp
, bpref
);
818 bpref
= dtogd(fs
, bno
);
819 for (i
= frags
; i
< fs
->fs_frag
; i
++)
820 setbit(cg_blksfree(cgp
), bpref
+ i
);
821 i
= fs
->fs_frag
- frags
;
822 cgp
->cg_cs
.cs_nffree
+= i
;
823 fs
->fs_cstotal
.cs_nffree
+= i
;
824 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
829 byte_swap_cgout(cgp
,fs
);
830 #endif /* REV_ENDIAN_FS */
834 bno
= ffs_mapsearch(fs
, cgp
, bpref
, allocsiz
);
838 byte_swap_cgout(cgp
,fs
);
839 #endif /* REV_ENDIAN_FS */
843 for (i
= 0; i
< frags
; i
++)
844 clrbit(cg_blksfree(cgp
), bno
+ i
);
845 cgp
->cg_cs
.cs_nffree
-= frags
;
846 fs
->fs_cstotal
.cs_nffree
-= frags
;
847 fs
->fs_cs(fs
, cg
).cs_nffree
-= frags
;
849 cgp
->cg_frsum
[allocsiz
]--;
850 if (frags
!= allocsiz
)
851 cgp
->cg_frsum
[allocsiz
- frags
]++;
854 byte_swap_cgout(cgp
,fs
);
855 #endif /* REV_ENDIAN_FS */
857 return (cg
* fs
->fs_fpg
+ bno
);
861 * Allocate a block in a cylinder group.
863 * This algorithm implements the following policy:
864 * 1) allocate the requested block.
865 * 2) allocate a rotationally optimal block in the same cylinder.
866 * 3) allocate the next available block on the block rotor for the
867 * specified cylinder group.
868 * Note that this routine only allocates fs_bsize blocks; these
869 * blocks may be fragmented by the routine that allocates them.
872 ffs_alloccgblk(fs
, cgp
, bpref
)
873 register struct fs
*fs
;
874 register struct cg
*cgp
;
877 ufs_daddr_t bno
, blkno
;
878 int cylno
, pos
, delta
;
882 if (bpref
== 0 || dtog(fs
, bpref
) != cgp
->cg_cgx
) {
883 bpref
= cgp
->cg_rotor
;
886 bpref
= blknum(fs
, bpref
);
887 bpref
= dtogd(fs
, bpref
);
889 * if the requested block is available, use it
891 if (ffs_isblock(fs
, cg_blksfree(cgp
), fragstoblks(fs
, bpref
))) {
895 if (fs
->fs_nrpos
<= 1 || fs
->fs_cpc
== 0) {
897 * Block layout information is not available.
898 * Leaving bpref unchanged means we take the
899 * next available free block following the one
900 * we just allocated. Hopefully this will at
901 * least hit a track cache on drives of unknown
902 * geometry (e.g. SCSI).
907 * check for a block available on the same cylinder
909 cylno
= cbtocylno(fs
, bpref
);
910 if (cg_blktot(cgp
)[cylno
] == 0)
913 * check the summary information to see if a block is
914 * available in the requested cylinder starting at the
915 * requested rotational position and proceeding around.
917 cylbp
= cg_blks(fs
, cgp
, cylno
);
918 pos
= cbtorpos(fs
, bpref
);
919 for (i
= pos
; i
< fs
->fs_nrpos
; i
++)
922 if (i
== fs
->fs_nrpos
)
923 for (i
= 0; i
< pos
; i
++)
928 * found a rotational position, now find the actual
929 * block. A panic if none is actually there.
931 pos
= cylno
% fs
->fs_cpc
;
932 bno
= (cylno
- pos
) * fs
->fs_spc
/ NSPB(fs
);
933 if (fs_postbl(fs
, pos
)[i
] == -1) {
934 printf("pos = %d, i = %d, fs = %s\n",
935 pos
, i
, fs
->fs_fsmnt
);
936 panic("ffs_alloccgblk: cyl groups corrupted");
938 for (i
= fs_postbl(fs
, pos
)[i
];; ) {
939 if (ffs_isblock(fs
, cg_blksfree(cgp
), bno
+ i
)) {
940 bno
= blkstofrags(fs
, (bno
+ i
));
943 delta
= fs_rotbl(fs
)[i
];
945 delta
+ i
> fragstoblks(fs
, fs
->fs_fpg
))
949 printf("pos = %d, i = %d, fs = %s\n", pos
, i
, fs
->fs_fsmnt
);
950 panic("ffs_alloccgblk: can't find blk in cyl");
954 * no blocks in the requested cylinder, so take next
955 * available one in this cylinder group.
957 bno
= ffs_mapsearch(fs
, cgp
, bpref
, (int)fs
->fs_frag
);
962 blkno
= fragstoblks(fs
, bno
);
963 ffs_clrblock(fs
, cg_blksfree(cgp
), (long)blkno
);
964 ffs_clusteracct(fs
, cgp
, blkno
, -1);
965 cgp
->cg_cs
.cs_nbfree
--;
966 fs
->fs_cstotal
.cs_nbfree
--;
967 fs
->fs_cs(fs
, cgp
->cg_cgx
).cs_nbfree
--;
968 cylno
= cbtocylno(fs
, bno
);
969 cg_blks(fs
, cgp
, cylno
)[cbtorpos(fs
, bno
)]--;
970 cg_blktot(cgp
)[cylno
]--;
972 return (cgp
->cg_cgx
* fs
->fs_fpg
+ bno
);
976 * Determine whether a cluster can be allocated.
978 * We do not currently check for optimal rotational layout if there
979 * are multiple choices in the same cylinder group. Instead we just
980 * take the first one that we find following bpref.
983 ffs_clusteralloc(ip
, cg
, bpref
, len
)
989 register struct fs
*fs
;
990 register struct cg
*cgp
;
992 int i
, got
, run
, bno
, bit
, map
;
996 struct vnode
*vp
=ITOV(ip
);
997 struct mount
*mp
=vp
->v_mount
;
998 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
999 #endif /* REV_ENDIAN_FS */
1002 if (fs
->fs_maxcluster
[cg
] < len
)
1004 if (bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)), (int)fs
->fs_cgsize
,
1007 cgp
= (struct cg
*)bp
->b_data
;
1010 byte_swap_cgin(cgp
,fs
);
1011 #endif /* REV_ENDIAN_FS */
1012 if (!cg_chkmagic(cgp
)) {
1015 byte_swap_cgout(cgp
,fs
);
1016 #endif /* REV_ENDIAN_FS */
1020 * Check to see if a cluster of the needed size (or bigger) is
1021 * available in this cylinder group.
1023 lp
= &cg_clustersum(cgp
)[len
];
1024 for (i
= len
; i
<= fs
->fs_contigsumsize
; i
++)
1027 if (i
> fs
->fs_contigsumsize
) {
1029 * This is the first time looking for a cluster in this
1030 * cylinder group. Update the cluster summary information
1031 * to reflect the true maximum sized cluster so that
1032 * future cluster allocation requests can avoid reading
1033 * the cylinder group map only to find no clusters.
1035 lp
= &cg_clustersum(cgp
)[len
- 1];
1036 for (i
= len
- 1; i
> 0; i
--)
1039 fs
->fs_maxcluster
[cg
] = i
;
1042 byte_swap_cgout(cgp
,fs
);
1043 #endif /* REV_ENDIAN_FS */
1047 * Search the cluster map to find a big enough cluster.
1048 * We take the first one that we find, even if it is larger
1049 * than we need as we prefer to get one close to the previous
1050 * block allocation. We do not search before the current
1051 * preference point as we do not want to allocate a block
1052 * that is allocated before the previous one (as we will
1053 * then have to wait for another pass of the elevator
1054 * algorithm before it will be read). We prefer to fail and
1055 * be recalled to try an allocation in the next cylinder group.
1057 if (dtog(fs
, bpref
) != cg
)
1060 bpref
= fragstoblks(fs
, dtogd(fs
, blknum(fs
, bpref
)));
1061 mapp
= &cg_clustersfree(cgp
)[bpref
/ NBBY
];
1063 bit
= 1 << (bpref
% NBBY
);
1064 for (run
= 0, got
= bpref
; got
< cgp
->cg_nclusterblks
; got
++) {
1065 if ((map
& bit
) == 0) {
1072 if ((got
& (NBBY
- 1)) != (NBBY
- 1)) {
1079 if (got
== cgp
->cg_nclusterblks
) {
1082 byte_swap_cgout(cgp
,fs
);
1083 #endif /* REV_ENDIAN_FS */
1087 * Allocate the cluster that we have found.
1089 for (i
= 1; i
<= len
; i
++)
1090 if (!ffs_isblock(fs
, cg_blksfree(cgp
), got
- run
+ i
))
1091 panic("ffs_clusteralloc: map mismatch");
1092 bno
= cg
* fs
->fs_fpg
+ blkstofrags(fs
, got
- run
+ 1);
1093 if (dtog(fs
, bno
) != cg
)
1094 panic("ffs_clusteralloc: allocated out of group");
1095 len
= blkstofrags(fs
, len
);
1096 for (i
= 0; i
< len
; i
+= fs
->fs_frag
)
1097 if ((got
= ffs_alloccgblk(fs
, cgp
, bno
+ i
)) != bno
+ i
)
1098 panic("ffs_clusteralloc: lost block");
1101 byte_swap_cgout(cgp
,fs
);
1102 #endif /* REV_ENDIAN_FS */
1112 * Determine whether an inode can be allocated.
1114 * Check to see if an inode is available, and if it is,
1115 * allocate it using the following policy:
1116 * 1) allocate the requested inode.
1117 * 2) allocate the next available inode after the requested
1118 * inode in the specified cylinder group.
1121 ffs_nodealloccg(ip
, cg
, ipref
, mode
)
1127 register struct fs
*fs
;
1128 register struct cg
*cgp
;
1130 int error
, start
, len
, loc
, map
, i
;
1132 struct vnode
*vp
=ITOV(ip
);
1133 struct mount
*mp
=vp
->v_mount
;
1134 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1135 #endif /* REV_ENDIAN_FS */
1138 if (fs
->fs_cs(fs
, cg
).cs_nifree
== 0)
1140 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1141 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1146 cgp
= (struct cg
*)bp
->b_data
;
1149 byte_swap_cgin(cgp
,fs
);
1150 #endif /* REV_ENDIAN_FS */
1151 if (!cg_chkmagic(cgp
) || cgp
->cg_cs
.cs_nifree
== 0) {
1154 byte_swap_cgout(cgp
,fs
);
1155 #endif /* REV_ENDIAN_FS */
1160 cgp
->cg_time
= time
.tv_sec
;
1162 ipref
%= fs
->fs_ipg
;
1163 if (isclr(cg_inosused(cgp
), ipref
))
1166 start
= cgp
->cg_irotor
/ NBBY
;
1167 len
= howmany(fs
->fs_ipg
- cgp
->cg_irotor
, NBBY
);
1168 loc
= skpc(0xff, len
, &cg_inosused(cgp
)[start
]);
1172 loc
= skpc(0xff, len
, &cg_inosused(cgp
)[0]);
1174 printf("cg = %d, irotor = %d, fs = %s\n",
1175 cg
, cgp
->cg_irotor
, fs
->fs_fsmnt
);
1176 panic("ffs_nodealloccg: map corrupted");
1180 i
= start
+ len
- loc
;
1181 map
= cg_inosused(cgp
)[i
];
1183 for (i
= 1; i
< (1 << NBBY
); i
<<= 1, ipref
++) {
1184 if ((map
& i
) == 0) {
1185 cgp
->cg_irotor
= ipref
;
1189 printf("fs = %s\n", fs
->fs_fsmnt
);
1190 panic("ffs_nodealloccg: block not in map");
1193 setbit(cg_inosused(cgp
), ipref
);
1194 cgp
->cg_cs
.cs_nifree
--;
1195 fs
->fs_cstotal
.cs_nifree
--;
1196 fs
->fs_cs(fs
, cg
).cs_nifree
--;
1198 if ((mode
& IFMT
) == IFDIR
) {
1199 cgp
->cg_cs
.cs_ndir
++;
1200 fs
->fs_cstotal
.cs_ndir
++;
1201 fs
->fs_cs(fs
, cg
).cs_ndir
++;
1205 byte_swap_cgout(cgp
,fs
);
1206 #endif /* REV_ENDIAN_FS */
1208 return (cg
* fs
->fs_ipg
+ ipref
);
1212 * Free a block or fragment.
1214 * The specified block or fragment is placed back in the
1215 * free map. If a fragment is deallocated, a possible
1216 * block reassembly is checked.
1218 ffs_blkfree(ip
, bno
, size
)
1219 register struct inode
*ip
;
1223 register struct fs
*fs
;
1224 register struct cg
*cgp
;
1227 int i
, error
, cg
, blk
, frags
, bbase
;
1229 struct vnode
*vp
=ITOV(ip
);
1230 struct mount
*mp
=vp
->v_mount
;
1231 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1232 #endif /* REV_ENDIAN_FS */
1234 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
1235 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
1236 ip
->i_dev
, fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
1237 panic("blkfree: bad size");
1240 if ((u_int
)bno
>= fs
->fs_size
) {
1241 printf("bad block %d, ino %d\n", bno
, ip
->i_number
);
1242 ffs_fserr(fs
, ip
->i_uid
, "bad block");
1245 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1246 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1251 cgp
= (struct cg
*)bp
->b_data
;
1254 byte_swap_cgin(cgp
,fs
);
1255 #endif /* REV_ENDIAN_FS */
1256 if (!cg_chkmagic(cgp
)) {
1259 byte_swap_cgout(cgp
,fs
);
1260 #endif /* REV_ENDIAN_FS */
1264 cgp
->cg_time
= time
.tv_sec
;
1265 bno
= dtogd(fs
, bno
);
1266 if (size
== fs
->fs_bsize
) {
1267 blkno
= fragstoblks(fs
, bno
);
1268 if (ffs_isblock(fs
, cg_blksfree(cgp
), blkno
)) {
1269 printf("dev = 0x%x, block = %d, fs = %s\n",
1270 ip
->i_dev
, bno
, fs
->fs_fsmnt
);
1271 panic("blkfree: freeing free block");
1273 ffs_setblock(fs
, cg_blksfree(cgp
), blkno
);
1274 ffs_clusteracct(fs
, cgp
, blkno
, 1);
1275 cgp
->cg_cs
.cs_nbfree
++;
1276 fs
->fs_cstotal
.cs_nbfree
++;
1277 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1278 i
= cbtocylno(fs
, bno
);
1279 cg_blks(fs
, cgp
, i
)[cbtorpos(fs
, bno
)]++;
1280 cg_blktot(cgp
)[i
]++;
1282 bbase
= bno
- fragnum(fs
, bno
);
1284 * decrement the counts associated with the old frags
1286 blk
= blkmap(fs
, cg_blksfree(cgp
), bbase
);
1287 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, -1);
1289 * deallocate the fragment
1291 frags
= numfrags(fs
, size
);
1292 for (i
= 0; i
< frags
; i
++) {
1293 if (isset(cg_blksfree(cgp
), bno
+ i
)) {
1294 printf("dev = 0x%x, block = %d, fs = %s\n",
1295 ip
->i_dev
, bno
+ i
, fs
->fs_fsmnt
);
1296 panic("blkfree: freeing free frag");
1298 setbit(cg_blksfree(cgp
), bno
+ i
);
1300 cgp
->cg_cs
.cs_nffree
+= i
;
1301 fs
->fs_cstotal
.cs_nffree
+= i
;
1302 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
1304 * add back in counts associated with the new frags
1306 blk
= blkmap(fs
, cg_blksfree(cgp
), bbase
);
1307 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, 1);
1309 * if a complete block has been reassembled, account for it
1311 blkno
= fragstoblks(fs
, bbase
);
1312 if (ffs_isblock(fs
, cg_blksfree(cgp
), blkno
)) {
1313 cgp
->cg_cs
.cs_nffree
-= fs
->fs_frag
;
1314 fs
->fs_cstotal
.cs_nffree
-= fs
->fs_frag
;
1315 fs
->fs_cs(fs
, cg
).cs_nffree
-= fs
->fs_frag
;
1316 ffs_clusteracct(fs
, cgp
, blkno
, 1);
1317 cgp
->cg_cs
.cs_nbfree
++;
1318 fs
->fs_cstotal
.cs_nbfree
++;
1319 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1320 i
= cbtocylno(fs
, bbase
);
1321 cg_blks(fs
, cgp
, i
)[cbtorpos(fs
, bbase
)]++;
1322 cg_blktot(cgp
)[i
]++;
1328 byte_swap_cgout(cgp
,fs
);
1329 #endif /* REV_ENDIAN_FS */
1335 * Verify allocation of a block or fragment. Returns true if block or
1336 * fragment is allocated, false if it is free.
1338 ffs_checkblk(ip
, bno
, size
)
1346 int i
, error
, frags
, free
;
1348 struct vnode
*vp
=ITOV(ip
);
1349 struct mount
*mp
=vp
->v_mount
;
1350 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1351 #endif /* REV_ENDIAN_FS */
1354 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
1355 printf("bsize = %d, size = %d, fs = %s\n",
1356 fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
1357 panic("checkblk: bad size");
1359 if ((u_int
)bno
>= fs
->fs_size
)
1360 panic("checkblk: bad block %d", bno
);
1361 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, dtog(fs
, bno
))),
1362 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1367 cgp
= (struct cg
*)bp
->b_data
;
1370 byte_swap_cgin(cgp
,fs
);
1371 #endif /* REV_ENDIAN_FS */
1372 if (!cg_chkmagic(cgp
)) {
1375 byte_swap_cgout(cgp
,fs
);
1376 #endif /* REV_ENDIAN_FS */
1380 bno
= dtogd(fs
, bno
);
1381 if (size
== fs
->fs_bsize
) {
1382 free
= ffs_isblock(fs
, cg_blksfree(cgp
), fragstoblks(fs
, bno
));
1384 frags
= numfrags(fs
, size
);
1385 for (free
= 0, i
= 0; i
< frags
; i
++)
1386 if (isset(cg_blksfree(cgp
), bno
+ i
))
1388 if (free
!= 0 && free
!= frags
)
1389 panic("checkblk: partially free fragment");
1393 byte_swap_cgout(cgp
,fs
);
1394 #endif /* REV_ENDIAN_FS */
1398 #endif /* DIAGNOSTIC */
1403 * The specified inode is placed back in the free map.
1407 struct vop_vfree_args
/* {
1408 struct vnode *a_pvp;
1413 register struct fs
*fs
;
1414 register struct cg
*cgp
;
1415 register struct inode
*pip
;
1416 ino_t ino
= ap
->a_ino
;
1420 struct vnode
*vp
=ap
->a_pvp
;
1421 struct mount
*mp
=vp
->v_mount
;
1422 int rev_endian
=(mp
->mnt_flag
& MNT_REVEND
);
1423 #endif /* REV_ENDIAN_FS */
1425 pip
= VTOI(ap
->a_pvp
);
1427 if ((u_int
)ino
>= fs
->fs_ipg
* fs
->fs_ncg
)
1428 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n",
1429 pip
->i_dev
, ino
, fs
->fs_fsmnt
);
1430 cg
= ino_to_cg(fs
, ino
);
1431 error
= bread(pip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1432 (int)fs
->fs_cgsize
, NOCRED
, &bp
);
1437 cgp
= (struct cg
*)bp
->b_data
;
1440 byte_swap_cgin(cgp
,fs
);
1441 #endif /* REV_ENDIAN_FS */
1442 if (!cg_chkmagic(cgp
)) {
1445 byte_swap_cgout(cgp
,fs
);
1446 #endif /* REV_ENDIAN_FS */
1450 cgp
->cg_time
= time
.tv_sec
;
1452 if (isclr(cg_inosused(cgp
), ino
)) {
1453 printf("dev = 0x%x, ino = %d, fs = %s\n",
1454 pip
->i_dev
, ino
, fs
->fs_fsmnt
);
1455 if (fs
->fs_ronly
== 0)
1456 panic("ifree: freeing free inode");
1458 clrbit(cg_inosused(cgp
), ino
);
1459 if (ino
< cgp
->cg_irotor
)
1460 cgp
->cg_irotor
= ino
;
1461 cgp
->cg_cs
.cs_nifree
++;
1462 fs
->fs_cstotal
.cs_nifree
++;
1463 fs
->fs_cs(fs
, cg
).cs_nifree
++;
1464 if ((ap
->a_mode
& IFMT
) == IFDIR
) {
1465 cgp
->cg_cs
.cs_ndir
--;
1466 fs
->fs_cstotal
.cs_ndir
--;
1467 fs
->fs_cs(fs
, cg
).cs_ndir
--;
1472 byte_swap_cgout(cgp
,fs
);
1473 #endif /* REV_ENDIAN_FS */
1479 * Find a block of the specified size in the specified cylinder group.
1481 * It is a panic if a request is made to find a block if none are
1485 ffs_mapsearch(fs
, cgp
, bpref
, allocsiz
)
1486 register struct fs
*fs
;
1487 register struct cg
*cgp
;
1492 int start
, len
, loc
, i
;
1493 int blk
, field
, subfield
, pos
;
1496 * find the fragment by searching through the free block
1497 * map for an appropriate bit pattern
1500 start
= dtogd(fs
, bpref
) / NBBY
;
1502 start
= cgp
->cg_frotor
/ NBBY
;
1503 len
= howmany(fs
->fs_fpg
, NBBY
) - start
;
1504 loc
= scanc((u_int
)len
, (u_char
*)&cg_blksfree(cgp
)[start
],
1505 (u_char
*)fragtbl
[fs
->fs_frag
],
1506 (u_char
)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
1510 loc
= scanc((u_int
)len
, (u_char
*)&cg_blksfree(cgp
)[0],
1511 (u_char
*)fragtbl
[fs
->fs_frag
],
1512 (u_char
)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
1514 printf("start = %d, len = %d, fs = %s\n",
1515 start
, len
, fs
->fs_fsmnt
);
1516 panic("ffs_alloccg: map corrupted");
1520 bno
= (start
+ len
- loc
) * NBBY
;
1521 cgp
->cg_frotor
= bno
;
1523 * found the byte in the map
1524 * sift through the bits to find the selected frag
1526 for (i
= bno
+ NBBY
; bno
< i
; bno
+= fs
->fs_frag
) {
1527 blk
= blkmap(fs
, cg_blksfree(cgp
), bno
);
1529 field
= around
[allocsiz
];
1530 subfield
= inside
[allocsiz
];
1531 for (pos
= 0; pos
<= fs
->fs_frag
- allocsiz
; pos
++) {
1532 if ((blk
& field
) == subfield
)
1538 printf("bno = %d, fs = %s\n", bno
, fs
->fs_fsmnt
);
1539 panic("ffs_alloccg: block not in map");
1544 * Update the cluster map because of an allocation or free.
1546 * Cnt == 1 means free; cnt == -1 means allocating.
1548 ffs_clusteracct(fs
, cgp
, blkno
, cnt
)
1556 u_char
*freemapp
, *mapp
;
1557 int i
, start
, end
, forw
, back
, map
, bit
;
1559 if (fs
->fs_contigsumsize
<= 0)
1561 freemapp
= cg_clustersfree(cgp
);
1562 sump
= cg_clustersum(cgp
);
1564 * Allocate or clear the actual block.
1567 setbit(freemapp
, blkno
);
1569 clrbit(freemapp
, blkno
);
1571 * Find the size of the cluster going forward.
1574 end
= start
+ fs
->fs_contigsumsize
;
1575 if (end
>= cgp
->cg_nclusterblks
)
1576 end
= cgp
->cg_nclusterblks
;
1577 mapp
= &freemapp
[start
/ NBBY
];
1579 bit
= 1 << (start
% NBBY
);
1580 for (i
= start
; i
< end
; i
++) {
1581 if ((map
& bit
) == 0)
1583 if ((i
& (NBBY
- 1)) != (NBBY
- 1)) {
1592 * Find the size of the cluster going backward.
1595 end
= start
- fs
->fs_contigsumsize
;
1598 mapp
= &freemapp
[start
/ NBBY
];
1600 bit
= 1 << (start
% NBBY
);
1601 for (i
= start
; i
> end
; i
--) {
1602 if ((map
& bit
) == 0)
1604 if ((i
& (NBBY
- 1)) != 0) {
1608 bit
= 1 << (NBBY
- 1);
1613 * Account for old cluster and the possibly new forward and
1616 i
= back
+ forw
+ 1;
1617 if (i
> fs
->fs_contigsumsize
)
1618 i
= fs
->fs_contigsumsize
;
1625 * Update cluster summary information.
1627 lp
= &sump
[fs
->fs_contigsumsize
];
1628 for (i
= fs
->fs_contigsumsize
; i
> 0; i
--)
1631 fs
->fs_maxcluster
[cgp
->cg_cgx
] = i
;
1635 * Fserr prints the name of a file system with an error diagnostic.
1637 * The form of the error message is:
1641 ffs_fserr(fs
, uid
, cp
)
1647 log(LOG_ERR
, "uid %d on %s: %s\n", uid
, fs
->fs_fsmnt
, cp
);