/*
- * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
File: VolumeAllocation.c
BlockDeallocate
Deallocate a contiguous run of allocation blocks.
+ invalidate_free_extent_cache Invalidate free extent cache for a given volume.
Internal routines:
BlockMarkFree
#include <sys/types.h>
#include <sys/buf.h>
#include <sys/systm.h>
+#include <sys/disk.h>
#include "../../hfs.h"
#include "../../hfs_dbg.h"
static OSErr ReadBitmapBlock(
ExtendedVCB *vcb,
- UInt32 bit,
- UInt32 **buffer,
- UInt32 *blockRef);
+ u_int32_t bit,
+ u_int32_t **buffer,
+ uintptr_t *blockRef);
static OSErr ReleaseBitmapBlock(
ExtendedVCB *vcb,
- UInt32 blockRef,
+ uintptr_t blockRef,
Boolean dirty);
static OSErr BlockAllocateAny(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- UInt32 endingBlock,
- UInt32 maxBlocks,
+ u_int32_t startingBlock,
+ u_int32_t endingBlock,
+ u_int32_t maxBlocks,
Boolean useMetaZone,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks);
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks);
static OSErr BlockAllocateContig(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- UInt32 minBlocks,
- UInt32 maxBlocks,
+ u_int32_t startingBlock,
+ u_int32_t minBlocks,
+ u_int32_t maxBlocks,
Boolean useMetaZone,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks);
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks);
static OSErr BlockFindContiguous(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- UInt32 endingBlock,
- UInt32 minBlocks,
- UInt32 maxBlocks,
+ u_int32_t startingBlock,
+ u_int32_t endingBlock,
+ u_int32_t minBlocks,
+ u_int32_t maxBlocks,
Boolean useMetaZone,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks);
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks);
static OSErr BlockAllocateKnown(
ExtendedVCB *vcb,
- UInt32 maxBlocks,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks);
+ u_int32_t maxBlocks,
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks);
+static int free_extent_cache_active(
+ ExtendedVCB *vcb);
/*
;________________________________________________________________________________
; The volume bitmap is read and updated; the volume bitmap cache may be changed.
;________________________________________________________________________________
*/
+static void
+sanity_check_free_ext(__unused ExtendedVCB *vcb, __unused int check_allocated)
+{
+#if DEBUG
+ u_int32_t i, j;
+
+ for(i=0; i < vcb->vcbFreeExtCnt; i++) {
+ u_int32_t start, nblocks;
+
+ start = vcb->vcbFreeExt[i].startBlock;
+ nblocks = vcb->vcbFreeExt[i].blockCount;
+
+
+ if (nblocks == 0) {
+ panic("hfs: %p: slot %d in the free extent array had a zero count (%d)\n", vcb, i, start);
+ }
+
+ if (check_allocated && hfs_isallocated(vcb, start, nblocks)) {
+ panic("hfs: %p: slot %d in the free extent array is bad (%d / %d)\n",
+ vcb, i, start, nblocks);
+ }
+
+ for(j=i+1; j < vcb->vcbFreeExtCnt; j++) {
+ if (start == vcb->vcbFreeExt[j].startBlock) {
+ panic("hfs: %p: slot %d/%d are dups?! (%d / %d ; %d / %d)\n",
+ vcb, i, j, start, nblocks, vcb->vcbFreeExt[i].startBlock,
+ vcb->vcbFreeExt[i].blockCount);
+ }
+ }
+ }
+#endif
+}
+
__private_extern__
OSErr BlockAllocate (
ExtendedVCB *vcb, /* which volume to allocate space on */
- UInt32 startingBlock, /* preferred starting block, or 0 for no preference */
- UInt32 minBlocks, /* desired number of blocks to allocate */
- UInt32 maxBlocks, /* maximum number of blocks to allocate */
- Boolean forceContiguous, /* non-zero to force contiguous allocation and to force */
- /* minBlocks bytes to actually be allocated */
-
- Boolean useMetaZone,
- UInt32 *actualStartBlock, /* actual first block of allocation */
- UInt32 *actualNumBlocks) /* number of blocks actually allocated; if forceContiguous */
+ u_int32_t startingBlock, /* preferred starting block, or 0 for no preference */
+ u_int32_t minBlocks, /* desired number of blocks to allocate */
+ u_int32_t maxBlocks, /* maximum number of blocks to allocate */
+ u_int32_t flags, /* option flags */
+ u_int32_t *actualStartBlock, /* actual first block of allocation */
+ u_int32_t *actualNumBlocks) /* number of blocks actually allocated; if forceContiguous */
/* was zero, then this may represent fewer than minBlocks */
{
- UInt32 freeBlocks;
+ u_int32_t freeBlocks;
OSErr err;
Boolean updateAllocPtr = false; // true if nextAllocation needs to be updated
+ Boolean useMetaZone;
+ Boolean forceContiguous;
+
+ if (flags & HFS_ALLOC_FORCECONTIG) {
+ forceContiguous = true;
+ } else {
+ forceContiguous = false;
+ }
+
+ if (flags & HFS_ALLOC_METAZONE) {
+ useMetaZone = true;
+ } else {
+ useMetaZone = false;
+ }
//
// Initialize outputs in case we get an error
*actualNumBlocks = 0;
freeBlocks = hfs_freeblks(VCBTOHFS(vcb), 0);
- //
- // If the disk is already full, don't bother.
- //
- if (freeBlocks == 0) {
- err = dskFulErr;
- goto Exit;
- }
- if (forceContiguous && freeBlocks < minBlocks) {
- err = dskFulErr;
- goto Exit;
- }
- /*
- * Clip if necessary so we don't over-subscribe the free blocks.
+ /* Skip free block check if blocks are being allocated for relocating
+ * data during truncating a volume.
+ *
+ * During hfs_truncatefs(), the volume free block count is updated
+ * before relocating data to reflect the total number of free blocks
+ * that will exist on the volume after resize is successful. This
+ * means that we have reserved allocation blocks required for relocating
+ * the data and hence there is no need to check the free blocks.
+ * It will also prevent resize failure when the number of blocks in
+ * an extent being relocated is more than the free blocks that will
+ * exist after the volume is resized.
*/
- if (minBlocks > freeBlocks) {
- minBlocks = freeBlocks;
- }
- if (maxBlocks > freeBlocks) {
- maxBlocks = freeBlocks;
+ if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
+ // If the disk is already full, don't bother.
+ if (freeBlocks == 0) {
+ err = dskFulErr;
+ goto Exit;
+ }
+ if (forceContiguous && freeBlocks < minBlocks) {
+ err = dskFulErr;
+ goto Exit;
+ }
+
+ /*
+ * Clip if necessary so we don't over-subscribe the free blocks.
+ */
+ if (minBlocks > freeBlocks) {
+ minBlocks = freeBlocks;
+ }
+ if (maxBlocks > freeBlocks) {
+ maxBlocks = freeBlocks;
+ }
}
//
// next block to allocate from.
//
if (startingBlock == 0) {
- VCB_LOCK(vcb);
- startingBlock = vcb->nextAllocation;
- VCB_UNLOCK(vcb);
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ if (vcb->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
+ startingBlock = vcb->sparseAllocation;
+ } else {
+ startingBlock = vcb->nextAllocation;
+ }
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
updateAllocPtr = true;
}
- if (startingBlock >= vcb->totalBlocks) {
+ if (startingBlock >= vcb->allocLimit) {
startingBlock = 0; /* overflow so start at beginning */
}
useMetaZone, actualStartBlock, actualNumBlocks);
/*
* If we allocated from a new position then
- * also update the roving allocatior.
+ * also update the roving allocator.
*/
if ((err == noErr) &&
(*actualStartBlock > startingBlock) &&
((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) ||
(*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) {
- vcb->nextAllocation = *actualStartBlock; /* XXX */
+
+ updateAllocPtr = true;
}
} else {
/*
*/
err = BlockAllocateKnown(vcb, maxBlocks, actualStartBlock, actualNumBlocks);
if (err == dskFulErr)
- err = BlockAllocateAny(vcb, startingBlock, vcb->totalBlocks,
+ err = BlockAllocateAny(vcb, startingBlock, vcb->allocLimit,
maxBlocks, useMetaZone, actualStartBlock,
actualNumBlocks);
if (err == dskFulErr)
actualNumBlocks);
}
- if (err == noErr) {
+Exit:
+ // if we actually allocated something then go update the
+ // various bits of state that we maintain regardless of
+ // whether there was an error (i.e. partial allocations
+ // still need to update things like the free block count).
+ //
+ if (*actualNumBlocks != 0) {
+ int i,j;
+
//
// If we used the volume's roving allocation pointer, then we need to update it.
// Adding in the length of the current allocation might reduce the next allocate
// the file is closed or its EOF changed. Leaving the allocation pointer at the
// start of the last allocation will avoid unnecessary fragmentation in this case.
//
- VCB_LOCK(vcb);
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ if (vcb->vcbFreeExtCnt == 0 && vcb->hfs_freed_block_count == 0) {
+ vcb->sparseAllocation = *actualStartBlock;
+ }
+ if (*actualNumBlocks < vcb->hfs_freed_block_count) {
+ vcb->hfs_freed_block_count -= *actualNumBlocks;
+ } else {
+ vcb->hfs_freed_block_count = 0;
+ }
+
if (updateAllocPtr &&
((*actualStartBlock < VCBTOHFS(vcb)->hfs_metazone_start) ||
(*actualStartBlock > VCBTOHFS(vcb)->hfs_metazone_end))) {
- vcb->nextAllocation = *actualStartBlock;
+ HFS_UPDATE_NEXT_ALLOCATION(vcb, *actualStartBlock);
}
- //
- // Update the number of free blocks on the volume
- //
- vcb->freeBlocks -= *actualNumBlocks;
- hfs_generate_volume_notifications(VCBTOHFS(vcb));
- VCB_UNLOCK(vcb);
+ for(i=0; i < (int)vcb->vcbFreeExtCnt; i++) {
+ u_int32_t start, end;
+
+ start = vcb->vcbFreeExt[i].startBlock;
+ end = start + vcb->vcbFreeExt[i].blockCount;
+
+ if ( (*actualStartBlock >= start && *actualStartBlock < end)
+ || ((*actualStartBlock + *actualNumBlocks) > start && *actualStartBlock < start)) {
+
+ for(j=i; j < (int)vcb->vcbFreeExtCnt-1; j++) {
+ vcb->vcbFreeExt[j] = vcb->vcbFreeExt[j+1];
+ }
+
+ vcb->vcbFreeExtCnt--;
+ i--; // so we'll check the guy we just copied down...
+
+ // keep looping because we may have invalidated more
+ // than one entry in the array
+ }
+ }
+
+ /*
+ * Update the number of free blocks on the volume
+ *
+ * Skip updating the free blocks count if the block are
+ * being allocated to relocate data as part of hfs_truncatefs()
+ */
+ if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
+ vcb->freeBlocks -= *actualNumBlocks;
+ }
MarkVCBDirty(vcb);
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+
+ sanity_check_free_ext(vcb, 1);
+
+ hfs_generate_volume_notifications(VCBTOHFS(vcb));
}
-Exit:
-
return err;
}
__private_extern__
OSErr BlockDeallocate (
ExtendedVCB *vcb, // Which volume to deallocate space on
- UInt32 firstBlock, // First block in range to deallocate
- UInt32 numBlocks) // Number of contiguous blocks to deallocate
+ u_int32_t firstBlock, // First block in range to deallocate
+ u_int32_t numBlocks, // Number of contiguous blocks to deallocate
+ u_int32_t flags)
{
OSErr err;
+ u_int32_t tempWord;
//
// If no blocks to deallocate, then exit early
//
// Update the volume's free block count, and mark the VCB as dirty.
//
- VCB_LOCK(vcb);
- vcb->freeBlocks += numBlocks;
- hfs_generate_volume_notifications(VCBTOHFS(vcb));
- if (vcb->nextAllocation == (firstBlock + numBlocks))
- vcb->nextAllocation -= numBlocks;
- VCB_UNLOCK(vcb);
+ HFS_MOUNT_LOCK(vcb, TRUE);
+
+ /*
+ * Do not update the free block count. This flags is specified
+ * when a volume is being truncated.
+ */
+ if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
+ vcb->freeBlocks += numBlocks;
+ }
+
+ vcb->hfs_freed_block_count += numBlocks;
+ if (firstBlock < vcb->sparseAllocation) {
+ vcb->sparseAllocation = firstBlock;
+ }
+
+ if (vcb->nextAllocation == (firstBlock + numBlocks)) {
+ HFS_UPDATE_NEXT_ALLOCATION(vcb, (vcb->nextAllocation - numBlocks));
+ }
+
+ if (free_extent_cache_active(vcb) == 0) {
+ goto skip_cache;
+ }
+
+ tempWord = vcb->vcbFreeExtCnt;
+ // Add this free chunk to the free extent list
+ if (vcb->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
+ // Sorted by start block
+ if (tempWord == kMaxFreeExtents && vcb->vcbFreeExt[kMaxFreeExtents-1].startBlock > firstBlock)
+ --tempWord;
+ if (tempWord < kMaxFreeExtents)
+ {
+ // We're going to add this extent. Bubble any smaller extents down in the list.
+ while (tempWord && vcb->vcbFreeExt[tempWord-1].startBlock > firstBlock)
+ {
+ vcb->vcbFreeExt[tempWord] = vcb->vcbFreeExt[tempWord-1];
+ if (vcb->vcbFreeExt[tempWord].startBlock < vcb->sparseAllocation) {
+ vcb->sparseAllocation = vcb->vcbFreeExt[tempWord].startBlock;
+ }
+ --tempWord;
+ }
+ vcb->vcbFreeExt[tempWord].startBlock = firstBlock;
+ vcb->vcbFreeExt[tempWord].blockCount = numBlocks;
+
+ if (vcb->vcbFreeExtCnt < kMaxFreeExtents) {
+ ++vcb->vcbFreeExtCnt;
+ }
+ }
+ } else {
+ // Sorted by num blocks
+ if (tempWord == kMaxFreeExtents && vcb->vcbFreeExt[kMaxFreeExtents-1].blockCount < numBlocks)
+ --tempWord;
+ if (tempWord < kMaxFreeExtents)
+ {
+ // We're going to add this extent. Bubble any smaller extents down in the list.
+ while (tempWord && vcb->vcbFreeExt[tempWord-1].blockCount < numBlocks)
+ {
+ vcb->vcbFreeExt[tempWord] = vcb->vcbFreeExt[tempWord-1];
+ if (vcb->vcbFreeExt[tempWord].startBlock < vcb->sparseAllocation) {
+ vcb->sparseAllocation = vcb->vcbFreeExt[tempWord].startBlock;
+ }
+ --tempWord;
+ }
+ vcb->vcbFreeExt[tempWord].startBlock = firstBlock;
+ vcb->vcbFreeExt[tempWord].blockCount = numBlocks;
+
+ if (vcb->vcbFreeExtCnt < kMaxFreeExtents) {
+ ++vcb->vcbFreeExtCnt;
+ }
+ }
+ }
+
+skip_cache:
MarkVCBDirty(vcb);
-
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+
+ sanity_check_free_ext(vcb, 1);
+
+ hfs_generate_volume_notifications(VCBTOHFS(vcb));
Exit:
return err;
}
-UInt8 freebitcount[16] = {
+u_int8_t freebitcount[16] = {
4, 3, 3, 2, 3, 2, 2, 1, /* 0 1 2 3 4 5 6 7 */
3, 2, 2, 1, 2, 1, 1, 0, /* 8 9 A B C D E F */
};
__private_extern__
-UInt32
+u_int32_t
MetaZoneFreeBlocks(ExtendedVCB *vcb)
{
- UInt32 freeblocks;
- UInt32 *currCache;
- UInt32 blockRef;
- UInt32 bit;
- UInt32 lastbit;
+ u_int32_t freeblocks;
+ u_int32_t *currCache;
+ uintptr_t blockRef;
+ u_int32_t bit;
+ u_int32_t lastbit;
int bytesleft;
int bytesperblock;
- UInt8 byte;
- UInt8 *buffer;
+ u_int8_t byte;
+ u_int8_t *buffer;
+
blockRef = 0;
bytesleft = freeblocks = 0;
+ buffer = NULL;
bit = VCBTOHFS(vcb)->hfs_metazone_start;
if (bit == 1)
bit = 0;
if (ReadBitmapBlock(vcb, bit, &currCache, &blockRef) != 0) {
return (0);
}
- buffer = (UInt8 *)currCache;
+ buffer = (u_int8_t *)currCache;
bytesleft = bytesperblock;
}
byte = *buffer++;
* Obtain the next allocation block (bit) that's
* outside the metadata allocation zone.
*/
-static UInt32 NextBitmapBlock(
+static u_int32_t NextBitmapBlock(
ExtendedVCB *vcb,
- UInt32 bit)
+ u_int32_t bit)
{
struct hfsmount *hfsmp = VCBTOHFS(vcb);
*/
static OSErr ReadBitmapBlock(
ExtendedVCB *vcb,
- UInt32 bit,
- UInt32 **buffer,
- UInt32 *blockRef)
+ u_int32_t bit,
+ u_int32_t **buffer,
+ uintptr_t *blockRef)
{
OSErr err;
struct buf *bp = NULL;
struct vnode *vp = NULL;
- UInt32 block;
- UInt32 blockSize;
+ daddr64_t block;
+ u_int32_t blockSize;
/*
- * volume bitmap blocks are protected by the Extents B-tree lock
+ * volume bitmap blocks are protected by the allocation file lock
*/
- REQUIRE_FILE_LOCK(vcb->extentsRefNum, false);
+ REQUIRE_FILE_LOCK(vcb->hfs_allocation_vp, false);
- blockSize = (UInt32)vcb->vcbVBMIOSize;
- block = bit / (blockSize * kBitsPerByte);
+ blockSize = (u_int32_t)vcb->vcbVBMIOSize;
+ block = (daddr64_t)(bit / (blockSize * kBitsPerByte));
if (vcb->vcbSigWord == kHFSPlusSigWord) {
- vp = vcb->allocationsRefNum; /* use allocation file vnode */
+ vp = vcb->hfs_allocation_vp; /* use allocation file vnode */
} else /* hfs */ {
vp = VCBTOHFS(vcb)->hfs_devvp; /* use device I/O vnode */
block += vcb->vcbVBMSt; /* map to physical block */
}
- err = meta_bread(vp, block, blockSize, NOCRED, &bp);
+ err = (int)buf_meta_bread(vp, block, blockSize, NOCRED, &bp);
if (bp) {
if (err) {
- brelse(bp);
- *blockRef = NULL;
+ buf_brelse(bp);
+ *blockRef = 0;
*buffer = NULL;
} else {
- *blockRef = (UInt32)bp;
- *buffer = (UInt32 *)bp->b_data;
+ *blockRef = (uintptr_t)bp;
+ *buffer = (u_int32_t *)buf_dataptr(bp);
}
}
*/
static OSErr ReleaseBitmapBlock(
ExtendedVCB *vcb,
- UInt32 blockRef,
+ uintptr_t blockRef,
Boolean dirty)
{
struct buf *bp = (struct buf *)blockRef;
if (blockRef == 0) {
if (dirty)
- panic("ReleaseBitmapBlock: missing bp");
+ panic("hfs: ReleaseBitmapBlock: missing bp");
return (0);
}
struct hfsmount *hfsmp = VCBTOHFS(vcb);
if (hfsmp->jnl) {
- journal_modify_block_end(hfsmp->jnl, bp);
+ journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
} else {
- bdwrite(bp);
+ buf_bdwrite(bp);
}
} else {
- brelse(bp);
+ buf_brelse(bp);
}
}
*/
static OSErr BlockAllocateContig(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- UInt32 minBlocks,
- UInt32 maxBlocks,
+ u_int32_t startingBlock,
+ u_int32_t minBlocks,
+ u_int32_t maxBlocks,
Boolean useMetaZone,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks)
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks)
{
OSErr err;
* with the free extent cache, this can lead to duplicate entries
* in the cache, causing the same blocks to be allocated twice.
*/
- err = BlockFindContiguous(vcb, startingBlock, vcb->totalBlocks, minBlocks,
+ err = BlockFindContiguous(vcb, startingBlock, vcb->allocLimit, minBlocks,
maxBlocks, useMetaZone, actualStartBlock, actualNumBlocks);
if (err == dskFulErr && startingBlock != 0) {
/*
err = BlockFindContiguous(vcb, 1, startingBlock, minBlocks, maxBlocks,
useMetaZone, actualStartBlock, actualNumBlocks);
}
- if (err != noErr) goto Exit;
-
- // sanity check
- if ((*actualStartBlock + *actualNumBlocks) > vcb->totalBlocks)
- panic("BlockAllocateContig: allocation overflow on \"%s\"", vcb->vcbVN);
-
//
// Now mark those blocks allocated.
//
- err = BlockMarkAllocated(vcb, *actualStartBlock, *actualNumBlocks);
-
-Exit:
- if (err != noErr) {
- *actualStartBlock = 0;
- *actualNumBlocks = 0;
- }
+ if (err == noErr)
+ err = BlockMarkAllocated(vcb, *actualStartBlock, *actualNumBlocks);
return err;
}
*/
static OSErr BlockAllocateAny(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- register UInt32 endingBlock,
- UInt32 maxBlocks,
+ u_int32_t startingBlock,
+ register u_int32_t endingBlock,
+ u_int32_t maxBlocks,
Boolean useMetaZone,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks)
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks)
{
OSErr err;
- register UInt32 block; // current block number
- register UInt32 currentWord; // Pointer to current word within bitmap block
- register UInt32 bitMask; // Word with given bits already set (ready to OR in)
- register UInt32 wordsLeft; // Number of words left in this bitmap block
- UInt32 *buffer = NULL;
- UInt32 *currCache = NULL;
- UInt32 blockRef;
- UInt32 bitsPerBlock;
- UInt32 wordsPerBlock;
+ register u_int32_t block; // current block number
+ register u_int32_t currentWord; // Pointer to current word within bitmap block
+ register u_int32_t bitMask; // Word with given bits already set (ready to OR in)
+ register u_int32_t wordsLeft; // Number of words left in this bitmap block
+ u_int32_t *buffer = NULL;
+ u_int32_t *currCache = NULL;
+ uintptr_t blockRef;
+ u_int32_t bitsPerBlock;
+ u_int32_t wordsPerBlock;
Boolean dirty = false;
struct hfsmount *hfsmp = VCBTOHFS(vcb);
+ /*
+ * When we're skipping the metadata zone and the start/end
+ * range overlaps with the metadata zone then adjust the
+ * start to be outside of the metadata zone. If the range
+ * is entirely inside the metadata zone then we can deny the
+ * request (dskFulErr).
+ */
+ if (!useMetaZone && (vcb->hfs_flags & HFS_METADATA_ZONE)) {
+ if (startingBlock <= vcb->hfs_metazone_end) {
+ if (endingBlock > (vcb->hfs_metazone_end + 2))
+ startingBlock = vcb->hfs_metazone_end + 1;
+ else {
+ err = dskFulErr;
+ goto Exit;
+ }
+ }
+ }
+
// Since this routine doesn't wrap around
if (maxBlocks > (endingBlock - startingBlock)) {
maxBlocks = endingBlock - startingBlock;
}
- /*
- * Skip over metadata blocks.
- */
- if (!useMetaZone)
- startingBlock = NextBitmapBlock(vcb, startingBlock);
-
//
// Pre-read the first bitmap block
//
// Set up the current position within the block
//
{
- UInt32 wordIndexInBlock;
+ u_int32_t wordIndexInBlock;
bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte;
wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord;
*/
if (!useMetaZone) {
block = NextBitmapBlock(vcb, block);
- if (block >= endingBlock) {
- err = dskFulErr;
- goto Exit;
- }
}
+ if (block >= endingBlock) {
+ err = dskFulErr;
+ goto Exit;
+ }
+
err = ReadBitmapBlock(vcb, block, &currCache, &blockRef);
if (err != noErr) goto Exit;
buffer = currCache;
* Skip over metadata blocks.
*/
if (!useMetaZone) {
- UInt32 nextBlock;
+ u_int32_t nextBlock;
nextBlock = NextBitmapBlock(vcb, block);
if (nextBlock != block) {
*actualNumBlocks = block - *actualStartBlock;
// sanity check
- if ((*actualStartBlock + *actualNumBlocks) > vcb->totalBlocks)
- panic("BlockAllocateAny: allocation overflow on \"%s\"", vcb->vcbVN);
+ if ((*actualStartBlock + *actualNumBlocks) > vcb->allocLimit)
+ panic("hfs: BlockAllocateAny: allocation overflow on \"%s\"", vcb->vcbVN);
}
else {
*actualStartBlock = 0;
dskFulErr Free extent cache is empty
_______________________________________________________________________
*/
+
static OSErr BlockAllocateKnown(
ExtendedVCB *vcb,
- UInt32 maxBlocks,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks)
+ u_int32_t maxBlocks,
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks)
{
- UInt32 i;
- UInt32 foundBlocks;
- UInt32 newStartBlock, newBlockCount;
-
- if (vcb->vcbFreeExtCnt == 0)
+ OSErr err;
+ u_int32_t i;
+ u_int32_t foundBlocks;
+ u_int32_t newStartBlock, newBlockCount;
+
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ if (free_extent_cache_active(vcb) == 0 ||
+ vcb->vcbFreeExtCnt == 0 ||
+ vcb->vcbFreeExt[0].blockCount == 0) {
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
return dskFulErr;
+ }
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
// Just grab up to maxBlocks of the first (largest) free exent.
*actualStartBlock = vcb->vcbFreeExt[0].startBlock;
foundBlocks = maxBlocks;
*actualNumBlocks = foundBlocks;
+ if (vcb->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
+ // since sparse volumes keep the free extent list sorted by starting
+ // block number, the list won't get re-ordered, it may only shrink
+ //
+ vcb->vcbFreeExt[0].startBlock += foundBlocks;
+ vcb->vcbFreeExt[0].blockCount -= foundBlocks;
+ if (vcb->vcbFreeExt[0].blockCount == 0) {
+ for(i=1; i < vcb->vcbFreeExtCnt; i++) {
+ vcb->vcbFreeExt[i-1] = vcb->vcbFreeExt[i];
+ }
+ vcb->vcbFreeExtCnt--;
+ }
+
+ goto done;
+ }
+
// Adjust the start and length of that extent.
newStartBlock = vcb->vcbFreeExt[0].startBlock + foundBlocks;
newBlockCount = vcb->vcbFreeExt[0].blockCount - foundBlocks;
+
// The first extent might not be the largest anymore. Bubble up any
// (now larger) extents to the top of the list.
// If this is now the smallest known free extent, then it might be smaller than
// other extents we didn't keep track of. So, just forget about this extent.
// After the previous loop, (i-1) is the index of the extent we just allocated from.
- if (i == vcb->vcbFreeExtCnt)
+ if (newBlockCount == 0)
{
- // It's now the smallest extent, so forget about it
+ // then just reduce the number of free extents since this guy got deleted
--vcb->vcbFreeExtCnt;
}
else
vcb->vcbFreeExt[i-1].startBlock = newStartBlock;
vcb->vcbFreeExt[i-1].blockCount = newBlockCount;
}
-
+
+done:
// sanity check
- if ((*actualStartBlock + *actualNumBlocks) > vcb->totalBlocks)
- panic("BlockAllocateKnown: allocation overflow on \"%s\"", vcb->vcbVN);
+ if ((*actualStartBlock + *actualNumBlocks) > vcb->allocLimit)
+ {
+ printf ("hfs: BlockAllocateKnown() found allocation overflow on \"%s\"", vcb->vcbVN);
+ hfs_mark_volume_inconsistent(vcb);
+ *actualStartBlock = 0;
+ *actualNumBlocks = 0;
+ err = EIO;
+ }
+ else
+ {
+ //
+ // Now mark the found extent in the bitmap
+ //
+ err = BlockMarkAllocated(vcb, *actualStartBlock, *actualNumBlocks);
+ }
- //
- // Now mark the found extent in the bitmap
- //
- return BlockMarkAllocated(vcb, *actualStartBlock, *actualNumBlocks);
+ sanity_check_free_ext(vcb, 1);
+
+ return err;
}
__private_extern__
OSErr BlockMarkAllocated(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- register UInt32 numBlocks)
+ u_int32_t startingBlock,
+ register u_int32_t numBlocks)
{
OSErr err;
- register UInt32 *currentWord; // Pointer to current word within bitmap block
- register UInt32 wordsLeft; // Number of words left in this bitmap block
- register UInt32 bitMask; // Word with given bits already set (ready to OR in)
- UInt32 firstBit; // Bit index within word of first bit to allocate
- UInt32 numBits; // Number of bits in word to allocate
- UInt32 *buffer = NULL;
- UInt32 blockRef;
- UInt32 bitsPerBlock;
- UInt32 wordsPerBlock;
+ register u_int32_t *currentWord; // Pointer to current word within bitmap block
+ register u_int32_t wordsLeft; // Number of words left in this bitmap block
+ register u_int32_t bitMask; // Word with given bits already set (ready to OR in)
+ u_int32_t firstBit; // Bit index within word of first bit to allocate
+ u_int32_t numBits; // Number of bits in word to allocate
+ u_int32_t *buffer = NULL;
+ uintptr_t blockRef;
+ u_int32_t bitsPerBlock;
+ u_int32_t wordsPerBlock;
// XXXdbg
struct hfsmount *hfsmp = VCBTOHFS(vcb);
// Initialize currentWord, and wordsLeft.
//
{
- UInt32 wordIndexInBlock;
+ u_int32_t wordIndexInBlock;
bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte;
wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord;
}
#if DEBUG_BUILD
if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
- panic("BlockMarkAllocated: blocks already allocated!");
+ panic("hfs: BlockMarkAllocated: blocks already allocated!");
}
#endif
*currentWord |= SWAP_BE32 (bitMask); // set the bits in the bitmap
}
#if DEBUG_BUILD
if (*currentWord != 0) {
- panic("BlockMarkAllocated: blocks already allocated!");
+ panic("hfs: BlockMarkAllocated: blocks already allocated!");
}
#endif
*currentWord = SWAP_BE32 (bitMask);
}
#if DEBUG_BUILD
if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
- panic("BlockMarkAllocated: blocks already allocated!");
+ panic("hfs: BlockMarkAllocated: blocks already allocated!");
}
#endif
*currentWord |= SWAP_BE32 (bitMask); // set the bits in the bitmap
__private_extern__
OSErr BlockMarkFree(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- register UInt32 numBlocks)
+ u_int32_t startingBlock,
+ register u_int32_t numBlocks)
{
OSErr err;
- register UInt32 *currentWord; // Pointer to current word within bitmap block
- register UInt32 wordsLeft; // Number of words left in this bitmap block
- register UInt32 bitMask; // Word with given bits already set (ready to OR in)
- UInt32 firstBit; // Bit index within word of first bit to allocate
- UInt32 numBits; // Number of bits in word to allocate
- UInt32 *buffer = NULL;
- UInt32 blockRef;
- UInt32 bitsPerBlock;
- UInt32 wordsPerBlock;
+ register u_int32_t *currentWord; // Pointer to current word within bitmap block
+ register u_int32_t wordsLeft; // Number of words left in this bitmap block
+ register u_int32_t bitMask; // Word with given bits already set (ready to OR in)
+ u_int32_t firstBit; // Bit index within word of first bit to allocate
+ u_int32_t numBits; // Number of bits in word to allocate
+ u_int32_t *buffer = NULL;
+ uintptr_t blockRef;
+ u_int32_t bitsPerBlock;
+ u_int32_t wordsPerBlock;
// XXXdbg
struct hfsmount *hfsmp = VCBTOHFS(vcb);
+ dk_discard_t discard;
+ /*
+ * NOTE: We use vcb->totalBlocks instead of vcb->allocLimit because we
+ * need to be able to free blocks being relocated during hfs_truncatefs.
+ */
if (startingBlock + numBlocks > vcb->totalBlocks) {
- panic("hfs: block mark free: trying to free non-existent blocks (%d %d %d)\n",
- startingBlock, numBlocks, vcb->totalBlocks);
+ printf ("hfs: BlockMarkFree() trying to free non-existent blocks starting at %u (numBlock=%u) on volume %s\n", startingBlock, numBlocks, vcb->vcbVN);
+ hfs_mark_volume_inconsistent(vcb);
+ err = EIO;
+ goto Exit;
}
+ memset(&discard, 0, sizeof(dk_discard_t));
+ discard.offset = (uint64_t)startingBlock * (uint64_t)vcb->blockSize;
+ discard.length = (uint64_t)numBlocks * (uint64_t)vcb->blockSize;
+
//
// Pre-read the bitmap block containing the first word of allocation
// Initialize currentWord, and wordsLeft.
//
{
- UInt32 wordIndexInBlock;
+ u_int32_t wordIndexInBlock;
bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte;
wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord;
if (buffer)
(void)ReleaseBitmapBlock(vcb, blockRef, true);
+ if (err == noErr) {
+ // it doesn't matter if this fails, it's just informational anyway
+ VNOP_IOCTL(vcb->hfs_devvp, DKIOCDISCARD, (caddr_t)&discard, 0, vfs_context_kernel());
+ }
+
+
return err;
Corruption:
#if DEBUG_BUILD
- panic("BlockMarkFree: blocks not allocated!");
+ panic("hfs: BlockMarkFree: blocks not allocated!");
#else
- printf("hfs: WARNING - blocks on volume %s not allocated!\n", vcb->vcbVN);
- vcb->vcbAtrb |= kHFSVolumeInconsistentMask;
- MarkVCBDirty(vcb);
+ printf ("hfs: BlockMarkFree() trying to free unallocated blocks on volume %s\n", vcb->vcbVN);
+ hfs_mark_volume_inconsistent(vcb);
err = EIO;
goto Exit;
#endif
static OSErr BlockFindContiguous(
ExtendedVCB *vcb,
- UInt32 startingBlock,
- UInt32 endingBlock,
- UInt32 minBlocks,
- UInt32 maxBlocks,
+ u_int32_t startingBlock,
+ u_int32_t endingBlock,
+ u_int32_t minBlocks,
+ u_int32_t maxBlocks,
Boolean useMetaZone,
- UInt32 *actualStartBlock,
- UInt32 *actualNumBlocks)
+ u_int32_t *actualStartBlock,
+ u_int32_t *actualNumBlocks)
{
OSErr err;
- register UInt32 currentBlock; // Block we're currently looking at.
- UInt32 firstBlock; // First free block in current extent.
- UInt32 stopBlock; // If we get to this block, stop searching for first free block.
- UInt32 foundBlocks; // Number of contiguous free blocks in current extent.
- UInt32 *buffer = NULL;
- register UInt32 *currentWord;
- register UInt32 bitMask;
- register UInt32 wordsLeft;
- register UInt32 tempWord;
- UInt32 blockRef;
- UInt32 wordsPerBlock;
+ register u_int32_t currentBlock; // Block we're currently looking at.
+ u_int32_t firstBlock; // First free block in current extent.
+ u_int32_t stopBlock; // If we get to this block, stop searching for first free block.
+ u_int32_t foundBlocks; // Number of contiguous free blocks in current extent.
+ u_int32_t *buffer = NULL;
+ register u_int32_t *currentWord;
+ register u_int32_t bitMask;
+ register u_int32_t wordsLeft;
+ register u_int32_t tempWord;
+ uintptr_t blockRef;
+ u_int32_t wordsPerBlock;
+ u_int32_t j, updated_free_extents = 0, really_add;
+
+ /*
+ * When we're skipping the metadata zone and the start/end
+ * range overlaps with the metadata zone then adjust the
+ * start to be outside of the metadata zone. If the range
+ * is entirely inside the metadata zone then we can deny the
+ * request (dskFulErr).
+ */
+ if (!useMetaZone && (vcb->hfs_flags & HFS_METADATA_ZONE)) {
+ if (startingBlock <= vcb->hfs_metazone_end) {
+ if (endingBlock > (vcb->hfs_metazone_end + 2))
+ startingBlock = vcb->hfs_metazone_end + 1;
+ else
+ goto DiskFull;
+ }
+ }
if ((endingBlock - startingBlock) < minBlocks)
{
*/
if (!useMetaZone) {
currentBlock = NextBitmapBlock(vcb, currentBlock);
- if (currentBlock >= stopBlock)
- break;
+ if (currentBlock >= stopBlock) {
+ goto LoopExit;
+ }
}
err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef);
* Skip over metadata blocks.
*/
if (!useMetaZone) {
- UInt32 nextBlock;
+ u_int32_t nextBlock;
nextBlock = NextBitmapBlock(vcb, currentBlock);
if (nextBlock != currentBlock) {
- break; /* allocation gap, so stop */
+ goto LoopExit; /* allocation gap, so stop */
}
}
if (foundBlocks >= minBlocks)
break; // Found what we needed!
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ if (free_extent_cache_active(vcb) == 0) {
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+ goto skip_cache;
+ }
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+
// This free chunk wasn't big enough. Try inserting it into the free extent cache in case
// the allocation wasn't forced contiguous.
+ really_add = 0;
+ for(j=0; j < vcb->vcbFreeExtCnt; j++) {
+ u_int32_t start, end;
+
+ start = vcb->vcbFreeExt[j].startBlock;
+ end = start + vcb->vcbFreeExt[j].blockCount;
+
+ if ( (firstBlock >= start && firstBlock < end)
+ || ((firstBlock + foundBlocks) > start && firstBlock < start)) {
+
+ // there's overlap with an existing entry so do not add this
+ break;
+ }
+
+ }
+
+ if (j >= vcb->vcbFreeExtCnt) {
+ really_add = 1;
+ }
+
tempWord = vcb->vcbFreeExtCnt;
- if (tempWord == kMaxFreeExtents && vcb->vcbFreeExt[kMaxFreeExtents-1].blockCount < foundBlocks)
- --tempWord;
- if (tempWord < kMaxFreeExtents)
- {
- // We're going to add this extent. Bubble any smaller extents down in the list.
- while (tempWord && vcb->vcbFreeExt[tempWord-1].blockCount < foundBlocks)
- {
- vcb->vcbFreeExt[tempWord] = vcb->vcbFreeExt[tempWord-1];
+ if (really_add && (vcb->hfs_flags & HFS_HAS_SPARSE_DEVICE)) {
+ // Sorted by starting block
+ if (tempWord == kMaxFreeExtents && vcb->vcbFreeExt[kMaxFreeExtents-1].startBlock > firstBlock)
--tempWord;
+ if (tempWord < kMaxFreeExtents)
+ {
+ // We're going to add this extent. Bubble any smaller extents down in the list.
+ while (tempWord && vcb->vcbFreeExt[tempWord-1].startBlock > firstBlock)
+ {
+ vcb->vcbFreeExt[tempWord] = vcb->vcbFreeExt[tempWord-1];
+ --tempWord;
+ }
+ vcb->vcbFreeExt[tempWord].startBlock = firstBlock;
+ vcb->vcbFreeExt[tempWord].blockCount = foundBlocks;
+
+ if (vcb->vcbFreeExtCnt < kMaxFreeExtents) {
+ ++vcb->vcbFreeExtCnt;
+ }
+ updated_free_extents = 1;
}
- vcb->vcbFreeExt[tempWord].startBlock = firstBlock;
- vcb->vcbFreeExt[tempWord].blockCount = foundBlocks;
+ } else if (really_add) {
+ // Sorted by blockCount
+ if (tempWord == kMaxFreeExtents && vcb->vcbFreeExt[kMaxFreeExtents-1].blockCount < foundBlocks)
+ --tempWord;
+ if (tempWord < kMaxFreeExtents)
+ {
+ // We're going to add this extent. Bubble any smaller extents down in the list.
+ while (tempWord && vcb->vcbFreeExt[tempWord-1].blockCount < foundBlocks)
+ {
+ vcb->vcbFreeExt[tempWord] = vcb->vcbFreeExt[tempWord-1];
+ --tempWord;
+ }
+ vcb->vcbFreeExt[tempWord].startBlock = firstBlock;
+ vcb->vcbFreeExt[tempWord].blockCount = foundBlocks;
- if (vcb->vcbFreeExtCnt < kMaxFreeExtents)
- ++vcb->vcbFreeExtCnt;
+ if (vcb->vcbFreeExtCnt < kMaxFreeExtents) {
+ ++vcb->vcbFreeExtCnt;
+ }
+ updated_free_extents = 1;
+ }
}
- } while (currentBlock < stopBlock);
+skip_cache:
+ sanity_check_free_ext(vcb, 0);
+ } while (currentBlock < stopBlock);
+LoopExit:
// Return the outputs.
if (foundBlocks < minBlocks)
err = noErr;
*actualStartBlock = firstBlock;
*actualNumBlocks = foundBlocks;
+ /*
+ * Sanity check for overflow
+ */
+ if ((firstBlock + foundBlocks) > vcb->allocLimit) {
+ panic("hfs: blk allocation overflow on \"%s\" sb:0x%08x eb:0x%08x cb:0x%08x fb:0x%08x stop:0x%08x min:0x%08x found:0x%08x",
+ vcb->vcbVN, startingBlock, endingBlock, currentBlock,
+ firstBlock, stopBlock, minBlocks, foundBlocks);
+ }
+ }
+
+ if (updated_free_extents && (vcb->hfs_flags & HFS_HAS_SPARSE_DEVICE)) {
+ int i;
+ u_int32_t min_start = vcb->totalBlocks;
+
+ // set the nextAllocation pointer to the smallest free block number
+ // we've seen so on the next mount we won't rescan unnecessarily
+ for(i=0; i < (int)vcb->vcbFreeExtCnt; i++) {
+ if (vcb->vcbFreeExt[i].startBlock < min_start) {
+ min_start = vcb->vcbFreeExt[i].startBlock;
+ }
+ }
+ if (min_start != vcb->totalBlocks) {
+ if (min_start < vcb->nextAllocation) {
+ vcb->nextAllocation = min_start;
+ }
+ if (min_start < vcb->sparseAllocation) {
+ vcb->sparseAllocation = min_start;
+ }
+ }
}
if (buffer)
(void) ReleaseBitmapBlock(vcb, blockRef, false);
+ sanity_check_free_ext(vcb, 1);
+
return err;
}
+/*
+ * Test to see if any blocks in a range are allocated.
+ *
+ * The journal or allocation file lock must be held.
+ */
+__private_extern__
+int
+hfs_isallocated(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks)
+{
+ u_int32_t *currentWord; // Pointer to current word within bitmap block
+ u_int32_t wordsLeft; // Number of words left in this bitmap block
+ u_int32_t bitMask; // Word with given bits already set (ready to test)
+ u_int32_t firstBit; // Bit index within word of first bit to allocate
+ u_int32_t numBits; // Number of bits in word to allocate
+ u_int32_t *buffer = NULL;
+ uintptr_t blockRef;
+ u_int32_t bitsPerBlock;
+ u_int32_t wordsPerBlock;
+ int inuse = 0;
+ int error;
+
+ /*
+ * Pre-read the bitmap block containing the first word of allocation
+ */
+ error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef);
+ if (error)
+ return (error);
+
+ /*
+ * Initialize currentWord, and wordsLeft.
+ */
+ {
+ u_int32_t wordIndexInBlock;
+
+ bitsPerBlock = hfsmp->vcbVBMIOSize * kBitsPerByte;
+ wordsPerBlock = hfsmp->vcbVBMIOSize / kBytesPerWord;
+
+ wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord;
+ currentWord = buffer + wordIndexInBlock;
+ wordsLeft = wordsPerBlock - wordIndexInBlock;
+ }
+
+ /*
+ * First test any non word aligned bits.
+ */
+ firstBit = startingBlock % kBitsPerWord;
+ if (firstBit != 0) {
+ bitMask = kAllBitsSetInWord >> firstBit;
+ numBits = kBitsPerWord - firstBit;
+ if (numBits > numBlocks) {
+ numBits = numBlocks;
+ bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits));
+ }
+ if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
+ inuse = 1;
+ goto Exit;
+ }
+ numBlocks -= numBits;
+ ++currentWord;
+ --wordsLeft;
+ }
+
+ /*
+ * Test whole words (32 blocks) at a time.
+ */
+ while (numBlocks >= kBitsPerWord) {
+ if (wordsLeft == 0) {
+ /* Read in the next bitmap block. */
+ startingBlock += bitsPerBlock;
+
+ buffer = NULL;
+ error = ReleaseBitmapBlock(hfsmp, blockRef, false);
+ if (error) goto Exit;
+
+ error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef);
+ if (error) goto Exit;
+
+ /* Readjust currentWord and wordsLeft. */
+ currentWord = buffer;
+ wordsLeft = wordsPerBlock;
+ }
+ if (*currentWord != 0) {
+ inuse = 1;
+ goto Exit;
+ }
+ numBlocks -= kBitsPerWord;
+ ++currentWord;
+ --wordsLeft;
+ }
+
+ /*
+ * Test any remaining blocks.
+ */
+ if (numBlocks != 0) {
+ bitMask = ~(kAllBitsSetInWord >> numBlocks);
+ if (wordsLeft == 0) {
+ /* Read in the next bitmap block */
+ startingBlock += bitsPerBlock;
+
+ buffer = NULL;
+ error = ReleaseBitmapBlock(hfsmp, blockRef, false);
+ if (error) goto Exit;
+
+ error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef);
+ if (error) goto Exit;
+ currentWord = buffer;
+ wordsLeft = wordsPerBlock;
+ }
+ if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
+ inuse = 1;
+ goto Exit;
+ }
+ }
+Exit:
+ if (buffer) {
+ (void)ReleaseBitmapBlock(hfsmp, blockRef, false);
+ }
+ return (inuse);
+}
+
+/* Invalidate free extent cache for a given volume.
+ * This cache is invalidated and disabled when a volume is being resized
+ * (via hfs_trucatefs() or hfs_extendefs()).
+ *
+ * Returns: Nothing
+ */
+void invalidate_free_extent_cache(ExtendedVCB *vcb)
+{
+ u_int32_t i;
+
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ for (i = 0; i < vcb->vcbFreeExtCnt; i++) {
+ vcb->vcbFreeExt[i].startBlock = 0;
+ vcb->vcbFreeExt[i].blockCount = 0;
+ }
+ vcb->vcbFreeExtCnt = 0;
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+
+ return;
+}
+
+/* Check whether free extent cache is active or not.
+ * This cache is invalidated and disabled when a volume is being resized
+ * (via hfs_trucatefs() or hfs_extendefs()).
+ *
+ * This function assumes that the caller is holding the lock on
+ * the mount point.
+ *
+ * Returns: 0 if the cache is not active,
+ * 1 if the cache is active.
+ */
+static int free_extent_cache_active(ExtendedVCB *vcb)
+{
+ int retval = 1;
+
+ if (vcb->hfs_flags & HFS_RESIZE_IN_PROGRESS) {
+ retval = 0;
+ }
+ return retval;
+}