/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*
* @APPLE_LICENSE_HEADER_END@
*/
-/* @(#)hfs_btreeio.c
-*
-* (c) 1998, 2000 Apple Computer, Inc. All Rights Reserved
-*
-* hfs_btreeio.c -- I/O Routines for the HFS B-tree files.
-*
-* HISTORY
-* 15-Feb-2000 Don Brady Added ClearBTNodes.
-* 16-Jul-1998 Don Brady In ExtendBtreeFile force all b-tree nodes to be contiguous on disk.
-* 4-Jun-1998 Pat Dirks Changed to do all B*-Tree writes synchronously (FORCESYNCBTREEWRITES = 1)
-* 18-apr-1998 Don Brady Call brelse on bread failure.
-* 17-Apr-1998 Pat Dirks Fixed ReleaseBTreeBlock to not call brelse when bwrite or bdwrite is called.
-* 13-apr-1998 Don Brady Add ExtendBTreeFile routine (from BTreeWrapper.c).
-* 26-mar-1998 Don Brady SetBTreeBlockSize was incorrectly excluding 512 byte blockSize.
-* 18-feb-1998 Don Brady Initially created file.
-*
-*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/buf.h>
+#include <sys/kernel.h>
#include <sys/mount.h>
#include <sys/vnode.h>
#include "hfs.h"
+#include "hfs_cnode.h"
#include "hfs_dbg.h"
#include "hfs_endian.h"
#define FORCESYNCBTREEWRITES 0
-static OSStatus FlushAlternate( ExtendedVCB *vcb );
static int ClearBTNodes(struct vnode *vp, long blksize, off_t offset, off_t amount);
+__private_extern__
OSStatus SetBTreeBlockSize(FileReference vp, ByteCount blockSize, ItemCount minBlockCount)
{
BTreeControlBlockPtr bTreePtr;
DBG_ASSERT(vp != NULL);
- DBG_ASSERT(VTOFCB(vp) != NULL);
- DBG_ASSERT(VTOFCB(vp)->fcbBTCBPtr != NULL);
DBG_ASSERT(blockSize >= kMinNodeSize);
if (blockSize > MAXBSIZE )
return (fsBTBadNodeSize);
- DBG_TREE(("SetBlockSizeProc: blockSize=%ld for file %ld\n", blockSize, H_FILEID(VTOH(vp))));
-
- bTreePtr = (BTreeControlBlockPtr)(VTOH(vp)->fcbBTCBPtr);
+ bTreePtr = (BTreeControlBlockPtr)VTOF(vp)->fcbBTCBPtr;
bTreePtr->nodeSize = blockSize;
return (E_NONE);
}
+__private_extern__
OSStatus GetBTreeBlock(FileReference vp, UInt32 blockNum, GetBlockOptions options, BlockDescriptor *block)
{
OSStatus retval = E_NONE;
struct buf *bp = NULL;
- if (options & kGetEmptyBlock)
- bp = getblk(vp, blockNum, block->blockSize, 0, 0, BLK_META);
- else
- retval = meta_bread(vp, blockNum, block->blockSize, NOCRED, &bp);
+ if (options & kGetEmptyBlock)
+ bp = getblk(vp, blockNum, block->blockSize, 0, 0, BLK_META);
+ else
+ retval = meta_bread(vp, blockNum, block->blockSize, NOCRED, &bp);
DBG_ASSERT(bp != NULL);
DBG_ASSERT(bp->b_data != NULL);
(SWAP_BE16 (((BTHeaderRec *)((char *)block->buffer + 14))->nodeSize) != bp->b_bcount)) {
/* Don't swap the descriptors at all, we don't care (this block will be invalidated) */
- SWAP_BT_NODE (block, ISHFSPLUS(VTOVCB(vp)), H_FILEID(VTOH(vp)), 3);
+ SWAP_BT_NODE (block, ISHFSPLUS(VTOVCB(vp)), VTOC(vp)->c_fileid, 3);
/* The node needs swapping */
} else if (*((UInt16 *)((char *)block->buffer + (block->blockSize - sizeof (UInt16)))) == 0x0e00) {
- SWAP_BT_NODE (block, ISHFSPLUS(VTOVCB(vp)), H_FILEID(VTOH(vp)), 0);
+ SWAP_BT_NODE (block, ISHFSPLUS(VTOVCB(vp)), VTOC(vp)->c_fileid, 0);
#if 0
/* The node is not already in native byte order, hence corrupt */
} else if (*((UInt16 *)((char *)block->buffer + (block->blockSize - sizeof (UInt16)))) != 0x000e) {
}
+__private_extern__
OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlockOptions options)
{
+ extern int bdwrite_internal(struct buf *, int);
OSStatus retval = E_NONE;
struct buf *bp = NULL;
bp = (struct buf *) blockPtr->blockHeader;
if (bp == NULL) {
- DBG_TREE(("ReleaseBlockProc: blockHeader is zero!\n"));
retval = -1;
goto exit;
}
brelse(bp); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */
} else {
if (options & kForceWriteBlock) {
- bp->b_flags |= B_DIRTY;
retval = VOP_BWRITE(bp);
} else if (options & kMarkBlockDirty) {
- bp->b_flags |= B_DIRTY;
#if FORCESYNCBTREEWRITES
VOP_BWRITE(bp);
#else
- if (options & kLockTransaction) {
- /*
- *
- * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
- * the buffer onto the LOCKED free list. This is necessary, otherwise
- * getnewbuf() would try to reclaim the buffers using bawrite, which
- * isn't going to work.
- *
- */
- extern int count_lock_queue __P((void));
- /* Don't hog all the buffers... */
- if (count_lock_queue() > kMaxLockedMetaBuffers)
- hfs_fsync_transaction(vp);
- bp->b_flags |= B_LOCKED;
- };
- bdwrite(bp);
+ if (options & kLockTransaction) {
+ /*
+ *
+ * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
+ * the buffer onto the LOCKED free list. This is necessary, otherwise
+ * getnewbuf() would try to reclaim the buffers using bawrite, which
+ * isn't going to work.
+ *
+ */
+ extern int count_lock_queue __P((void));
+ /* Don't hog all the buffers... */
+ if (count_lock_queue() > kMaxLockedMetaBuffers) {
+ hfs_btsync(vp, HFS_SYNCTRANS);
+ /* Rollback sync time to cause a sync on lock release... */
+ (void) BTSetLastSync(VTOF(vp), time.tv_sec - (kMaxSecsForFsync + 1));
+ }
+ bp->b_flags |= B_LOCKED;
+ }
+ /*
+ * Delay-write this block.
+ * If the maximum delayed buffers has been exceeded then
+ * free up some buffers and fall back to an asynchronous write.
+ */
+ if (bdwrite_internal(bp, 1) != 0) {
+ hfs_btsync(vp, 0);
+ /* Rollback sync time to cause a sync on lock release... */
+ (void) BTSetLastSync(VTOF(vp), time.tv_sec - (kMaxSecsForFsync + 1));
+ bp->b_flags &= ~B_LOCKED;
+ bawrite(bp);
+ }
#endif
} else {
}
+__private_extern__
OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF)
{
#pragma unused (maxEOF)
UInt64 actualBytesAdded;
UInt64 bytesToAdd;
UInt32 extendFlags;
+ u_int32_t startAllocation;
+ u_int32_t fileblocks;
BTreeInfoRec btInfo;
ExtendedVCB *vcb;
FCB *filePtr;
{
bytesToAdd = minEOF - filePtr->fcbEOF;
- if (bytesToAdd < filePtr->fcbClmpSize)
- bytesToAdd = filePtr->fcbClmpSize; //XXX why not always be a mutiple of clump size?
+ if (bytesToAdd < filePtr->ff_clumpsize)
+ bytesToAdd = filePtr->ff_clumpsize; //XXX why not always be a mutiple of clump size?
}
else
{
- DBG_TREE((" ExtendBTreeFile: minEOF is smaller than current size!"));
return -1;
}
- vcb = FCBTOVCB(filePtr);
+ vcb = VTOVCB(vp);
/*
* The Extents B-tree can't have overflow extents. ExtendFileC will
* when the resident extents are exhausted.
*/
/* XXX warning - this can leave the volume bitmap unprotected during ExtendFileC call */
- if(H_FILEID(filePtr) != kHFSExtentsFileID)
+ if(VTOC(vp)->c_fileid != kHFSExtentsFileID)
{
p = current_proc();
/* lock extents b-tree (also protects volume bitmap) */
(void) BTGetInformation(filePtr, 0, &btInfo);
/*
- * The b-tree code expects nodes to be contiguous. So when
+ * The b-tree code expects nodes to be contiguous. So when
* the allocation block size is less than the b-tree node
- * size, we need to force disk allocations to be contiguous.
- */
+ * size, we need to force disk allocations to be contiguous.
+ */
if (vcb->blockSize >= btInfo.nodeSize) {
extendFlags = 0;
} else {
extendFlags = kEFAllMask | kEFContigMask;
}
- retval = ExtendFileC(vcb, filePtr, bytesToAdd, 0, extendFlags, &actualBytesAdded);
+ fileblocks = filePtr->ff_blocks;
+ startAllocation = vcb->nextAllocation;
- if(H_FILEID(filePtr) != kHFSExtentsFileID)
- (void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, p);
+ retval = ExtendFileC(vcb, filePtr, bytesToAdd, 0, extendFlags, &actualBytesAdded);
+ /*
+ * If a new extent was added then move the roving allocator
+ * reference forward by the current b-tree file size so
+ * there's plenty of room to grow.
+ */
+ if ((retval == 0) &&
+ (vcb->nextAllocation > startAllocation) &&
+ ((vcb->nextAllocation + fileblocks) < vcb->totalBlocks)) {
+ vcb->nextAllocation += fileblocks;
+ }
+
+ if(VTOC(vp)->c_fileid != kHFSExtentsFileID) {
+ /*
+ * Get any extents overflow b-tree changes to disk ASAP!
+ */
+ if (retval == 0) {
+ (void) BTFlushPath(VTOF(vcb->extentsRefNum));
+ (void) VOP_FSYNC(vcb->extentsRefNum, NOCRED, MNT_WAIT, p);
+ }
+ (void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, p);
+ }
if (retval)
return (retval);
-
- if (actualBytesAdded < bytesToAdd)
- DBG_TREE((" ExtendBTreeFile: actualBytesAdded < bytesToAdd!"));
- filePtr->fcbEOF = filePtr->fcbPLen;
+ filePtr->fcbEOF = (u_int64_t)filePtr->ff_blocks * (u_int64_t)vcb->blockSize;
retval = ClearBTNodes(vp, btInfo.nodeSize, filePtr->fcbEOF - actualBytesAdded, actualBytesAdded);
if (retval)
/*
* Update the Alternate MDB or Alternate VolumeHeader
*/
- if ((H_FILEID(filePtr) == kHFSExtentsFileID) ||
- (H_FILEID(filePtr) == kHFSCatalogFileID) ||
- (H_FILEID(filePtr) == kHFSAttributesFileID)
+ if ((VTOC(vp)->c_fileid == kHFSExtentsFileID) ||
+ (VTOC(vp)->c_fileid == kHFSCatalogFileID) ||
+ (VTOC(vp)->c_fileid == kHFSAttributesFileID)
) {
MarkVCBDirty( vcb );
- if (vcb->vcbSigWord == kHFSPlusSigWord) {
- retval = hfs_flushvolumeheader(VCBTOHFS(vcb), 0);
- } else {
- retval = hfs_flushMDB(VCBTOHFS(vcb), 0);
- }
- if (retval == 0) {
- retval = FlushAlternate(vcb);
- }
+ retval = hfs_flushvolumeheader(VCBTOHFS(vcb), MNT_WAIT, HFS_ALTFLUSH);
}
return retval;
}
-static OSStatus
-FlushAlternate( ExtendedVCB *vcb )
-{
- struct hfsmount *hfsmp = VCBTOHFS(vcb);
- struct vnode *dev_vp = hfsmp->hfs_devvp;
- struct buf *pri_bp = NULL;
- struct buf *alt_bp = NULL;
- int sectorsize;
- u_long priIDSector;
- u_long altIDSector;
- int result;
-
- sectorsize = hfsmp->hfs_phys_block_size;
- priIDSector = (vcb->hfsPlusIOPosOffset / sectorsize) +
- HFS_PRI_SECTOR(sectorsize);
-
- altIDSector = (vcb->hfsPlusIOPosOffset / sectorsize) +
- HFS_ALT_SECTOR(sectorsize, hfsmp->hfs_phys_block_count);
-
- /* Get the main MDB/VolumeHeader block */
- result = meta_bread(dev_vp, priIDSector, sectorsize, NOCRED, &pri_bp);
- if (result)
- goto exit;
-
- /* Get the alternate MDB/VolumeHeader block */
- result = meta_bread(dev_vp, altIDSector, sectorsize, NOCRED, &alt_bp);
- if (result)
- goto exit;
-
- bcopy(pri_bp->b_data + HFS_PRI_OFFSET(sectorsize),
- alt_bp->b_data + HFS_ALT_OFFSET(sectorsize), kMDBSize);
-
- result = VOP_BWRITE(alt_bp);
- alt_bp = NULL;
-exit:
- if (alt_bp)
- brelse(alt_bp);
- if (pri_bp)
- brelse(pri_bp);
-
- return (result);
-}
-
-
/*
* Clear out (zero) new b-tree nodes on disk.
*/
if (bp == NULL)
continue;
bzero((char *)bp->b_data, blksize);
- bp->b_flags |= (B_DIRTY | B_AGE);
+ bp->b_flags |= B_AGE;
/* wait/yield every 32 blocks so we don't hog all the buffers */
if ((blk % 32) == 0)