#include <sys/types.h>
#include <sys/ubc.h>
#include <sys/vm.h>
-#include <dev/disk.h>
#include "hfs.h"
#include "hfs_dbg.h"
struct timezone gTimeZone = {8*60,1};
-/*************************************************************************************/
-
-/*************************************************************************************/
-/*
- * The following two routines work in tandem: StoreBufferMapping stores
- * successive buffer address -> buffer pointer mappings in a circular
- * match list, advancing the list index forward each time, while LookupBufferMapping
- * looks backwards through the list to look up a particular mapping (which is
- * typically the entry currently pointed to by gBufferAddress).
- *
- */
-static void StoreBufferMapping(caddr_t bufferAddress, struct buf *bp)
-{
- int i;
-
- DBG_ASSERT(gBufferListIndex >= 0);
- DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE);
-
- simple_lock(&gBufferPtrListLock);
-
- /* We've got at most BUFFERPTRLISTSIZE tries at this... */
- for (i = BUFFERPTRLISTSIZE; i > 0; --i) {
- if (gBufferAddress[gBufferListIndex] == NULL) {
- gBufferAddress[gBufferListIndex] = bufferAddress;
- gBufferHeaderPtr[gBufferListIndex] = bp;
- break;
- }
- gBufferListIndex = (gBufferListIndex + 1) % BUFFERPTRLISTSIZE;
- };
-
- if (i == 0) {
- panic("StoreBufferMapping: couldn't find an empty slot in buffer list.");
- };
-
- DBG_ASSERT(gBufferListIndex >= 0);
- DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE);
-
- simple_unlock(&gBufferPtrListLock);
-}
-
-
-/*static*/ OSErr LookupBufferMapping(caddr_t bufferAddress, struct buf **bpp, int *mappingIndexPtr)
-{
- OSErr err = E_NONE;
- int i;
- int listIndex = gBufferListIndex;
- struct buf *bp = NULL;
-
- DBG_ASSERT(gBufferListIndex >= 0);
- DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE);
-
- simple_lock(&gBufferPtrListLock);
-
- /* We've got at most BUFFERPTRLISTSIZE tries at this... */
- for (i = BUFFERPTRLISTSIZE; i > 0; --i) {
- if (gBufferAddress[listIndex] == bufferAddress) {
- *mappingIndexPtr = listIndex;
- bp = gBufferHeaderPtr[listIndex];
- break;
- };
-
- listIndex = (listIndex - 1);
- if (listIndex < 0) {
- listIndex = BUFFERPTRLISTSIZE - 1;
- };
- };
-
- if (bp == NULL) {
- DEBUG_BREAK_MSG(("LookupBufferMapping: couldn't find buffer header for buffer in list.\n"));
- err = -1;
- };
-
- DBG_ASSERT(gBufferListIndex >= 0);
- DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE);
-
- simple_unlock(&gBufferPtrListLock);
-
- *bpp = bp;
- return err;
-}
-
-
-static void ReleaseMappingEntry(int entryIndex) {
-
- DBG_ASSERT(gBufferListIndex >= 0);
- DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE);
-
- simple_lock(&gBufferPtrListLock);
- gBufferAddress[entryIndex] = NULL;
- simple_unlock(&gBufferPtrListLock);
-};
-#if HFS_DIAGNOSTIC
-#define DBG_GETBLOCK 0
-#else
-#define DBG_GETBLOCK 0
-#endif
-
-OSErr GetBlock_glue (UInt16 options, UInt32 blockNum, Ptr *baddress, FileReference fileRefNum, ExtendedVCB * vcb)
-{
- int status;
- struct buf *bp = NULL;
- int readcount = 0;
-
-#if DBG_GETBLOCK
- DBG_IO(("Getting block %ld with options %d and a refnum of %x\n", blockNum, options, fileRefNum ));
-#endif
-
- if ((options & ~(gbReadMask | gbNoReadMask)) != 0) {
- DEBUG_BREAK_MSG(("GetBlock_glue: options = 0x%04X.\n", options));
- };
-
- *baddress = NULL;
-
- if (options & gbNoReadMask) {
- if (fileRefNum == NULL) {
- bp = getblk (VCBTOHFS(vcb)->hfs_devvp,
- IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size),
- IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size),
- 0,
- 0,
- BLK_META);
- } else {
- bp = getblk (fileRefNum,
- IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size),
- IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size),
- 0,
- 0,
- BLK_META);
- };
- status = E_NONE;
- } else {
- do {
- if (fileRefNum == NULL) {
- status = meta_bread (VCBTOHFS(vcb)->hfs_devvp,
- IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size),
- IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size),
- NOCRED,
- &bp);
- } else {
- status = meta_bread (fileRefNum,
- IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size),
- IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size),
- NOCRED,
- &bp);
- };
- if (status != E_NONE) {
- if (bp) brelse(bp);
- goto Error_Exit;
- };
-
- if (bp == NULL) {
- status = -1;
- goto Error_Exit;
- };
-
- ++readcount;
-
- if ((options & gbReadMask) && (bp->b_flags & B_CACHE)) {
- /* Rats! The block was found in the cache just when we really wanted a
- fresh copy off disk...
- */
- if (bp->b_flags & B_DIRTY) {
- DEBUG_BREAK_MSG(("GetBlock_glue: forced read for dirty block!\n"))
- };
- bp->b_flags |= B_INVAL;
- brelse(bp);
-
- /* Fall through and try again until we get a fresh copy from the disk... */
- };
- } while (((options & gbReadMask) != 0) && (readcount <= 1));
- };
-
- *baddress = bp->b_data + IOBYTEOFFSETFORBLK(bp->b_blkno, VCBTOHFS(vcb)->hfs_phys_block_size);
- StoreBufferMapping(*baddress, bp);
-
-Error_Exit: ;
- return status;
-}
-
-
-OSErr RelBlock_glue (Ptr address, UInt16 options )
-{
- int err;
- struct buf *bp;
- int mappingEntry;
-
- if (options & ~(rbTrashMask | rbDirtyMask | rbWriteMask) == 0) {
- DEBUG_BREAK_MSG(("RelBlock_glue: options = 0x%04X.\n", options));
- };
-
- if ((err = LookupBufferMapping(address, &bp, &mappingEntry))) {
- DEBUG_BREAK_MSG(("Failed to find buffer pointer for buffer in RelBlock_glue.\n"));
- } else {
- if (bp->b_flags & B_DIRTY) {
- /* The buffer was previously marked dirty (using MarkBlock_glue):
- now's the time to write it. */
- options |= rbDirtyMask;
- };
- ReleaseMappingEntry(mappingEntry);
- if (options & rbTrashMask) {
- bp->b_flags |= B_INVAL;
- brelse(bp);
- } else {
- if (options & (rbDirtyMask | rbWriteMask)) {
- bp->b_flags |= B_DIRTY;
- if (options & rbWriteMask) {
- bwrite(bp);
- } else {
- bdwrite(bp);
- }
- } else {
- brelse(bp);
- };
- };
- err = E_NONE;
- };
- return err;
-}
-
/* */
/* Creates a new vnode to hold a psuedo file like an extents tree file */
/* */
#define E_NONE 0
#define kHFSBlockSize 512
-#define kHFSBlockShift 9 /* 2^9 = 512 */
#define IOBLKNOFORBLK(STARTINGBLOCK, BLOCKSIZEINBYTES) ((daddr_t)((STARTINGBLOCK) / ((BLOCKSIZEINBYTES) >> 9)))
#define IOBLKCNTFORBLK(STARTINGBLOCK, BYTESTOTRANSFER, BLOCKSIZEINBYTES) \
(IOBLKCNTFORBYTE((STARTINGBYTE),(BYTESTOTRANSFER),(BLOCKSIZEINBYTES)) * (BLOCKSIZEINBYTES))
#define IOBYTEOFFSETFORBYTE(STARTINGBYTE, BLOCKSIZEINBYTES) ((STARTINGBYTE) - (IOBLKNOFORBYTE((STARTINGBYTE), (BLOCKSIZEINBYTES)) * (BLOCKSIZEINBYTES)))
+
+#define HFS_PRI_SECTOR(blksize) (1024 / (blksize))
+#define HFS_PRI_OFFSET(blksize) ((blksize) > 1024 ? 1024 : 0)
+
+#define HFS_ALT_SECTOR(blksize, blkcnt) (((blkcnt) - 1) - (512 / (blksize)))
+#define HFS_ALT_OFFSET(blksize) ((blksize) > 1024 ? (blksize) - 1024 : 0)
+
#define MAKE_VREFNUM(x) ((int32_t)((x) & 0xffff))
/*
* This is the straight GMT conversion constant:
unsigned long baseMultiple);
OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb,
- u_long sectors, struct proc *p);
+ struct proc *p);
OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp,
- u_long embBlkOffset, u_long sectors, struct proc *p);
+ off_t embeddedOffset, off_t disksize, struct proc *p);
OSStatus GetInitializedVNode(struct hfsmount *hfsmp, struct vnode **tmpvnode);
int hfs_getconverter(u_int32_t encoding, hfs_to_unicode_func_t *get_unicode,
struct buf *bp = NULL;
if (options & kGetEmptyBlock)
- bp = getblk (vp,
- IOBLKNOFORBLK(blockNum, VTOHFS(vp)->hfs_phys_block_size),
- IOBYTECCNTFORBLK(blockNum, block->blockSize, VTOHFS(vp)->hfs_phys_block_size),
- 0,
- 0,
- BLK_META);
+ bp = getblk(vp, blockNum, block->blockSize, 0, 0, BLK_META);
else
- retval = meta_bread(vp,
- IOBLKNOFORBLK(blockNum, VTOHFS(vp)->hfs_phys_block_size),
- IOBYTECCNTFORBLK(blockNum, block->blockSize, VTOHFS(vp)->hfs_phys_block_size),
- NOCRED,
- &bp);
+ retval = meta_bread(vp, blockNum, block->blockSize, NOCRED, &bp);
DBG_ASSERT(bp != NULL);
DBG_ASSERT(bp->b_data != NULL);
if (retval == E_NONE) {
block->blockHeader = bp;
- block->buffer = bp->b_data + IOBYTEOFFSETFORBLK(bp->b_blkno, VTOHFS(vp)->hfs_phys_block_size);
+ block->buffer = bp->b_data;
block->blockReadFromDisk = (bp->b_flags & B_CACHE) == 0; /* not found in cache ==> came from disk */
#if BYTE_ORDER == LITTLE_ENDIAN
static OSStatus
FlushAlternate( ExtendedVCB *vcb )
{
- void *maindata;
- void *altdata;
+ struct hfsmount *hfsmp = VCBTOHFS(vcb);
+ struct vnode *dev_vp = hfsmp->hfs_devvp;
+ struct buf *pri_bp = NULL;
+ struct buf *alt_bp = NULL;
+ int sectorsize;
+ u_long priIDSector;
+ u_long altIDSector;
int result;
+ sectorsize = hfsmp->hfs_phys_block_size;
+ priIDSector = (vcb->hfsPlusIOPosOffset / sectorsize) +
+ HFS_PRI_SECTOR(sectorsize);
+
+ altIDSector = (vcb->hfsPlusIOPosOffset / sectorsize) +
+ HFS_ALT_SECTOR(sectorsize, hfsmp->hfs_phys_block_count);
+
/* Get the main MDB/VolumeHeader block */
- result = GetBlock_glue(gbDefault,
- (vcb->hfsPlusIOPosOffset / kHFSBlockSize) + kMasterDirectoryBlock,
- (Ptr *)&maindata, kNoFileReference, vcb);
- if (result) return (result);
-
- /* Get the alternate MDB/VolumeHeader block */
- result = GetBlock_glue( gbDefault, vcb->altIDSector,
- (Ptr *)&altdata, kNoFileReference, vcb );
+ result = meta_bread(dev_vp, priIDSector, sectorsize, NOCRED, &pri_bp);
+ if (result)
+ goto exit;
- if (result == 0) {
- bcopy(maindata, altdata, kMDBSize);
+ /* Get the alternate MDB/VolumeHeader block */
+ result = meta_bread(dev_vp, altIDSector, sectorsize, NOCRED, &alt_bp);
+ if (result)
+ goto exit;
- result = RelBlock_glue( (Ptr)altdata, rbWriteMask );
- }
+ bcopy(pri_bp->b_data + HFS_PRI_OFFSET(sectorsize),
+ alt_bp->b_data + HFS_ALT_OFFSET(sectorsize), kMDBSize);
- (void) RelBlock_glue( (Ptr)maindata, rbFreeMask );
+ result = VOP_BWRITE(alt_bp);
+ alt_bp = NULL;
+exit:
+ if (alt_bp)
+ brelse(alt_bp);
+ if (pri_bp)
+ brelse(pri_bp);
return (result);
}
if (((UInt16 *)((char *)bp->b_data + bp->b_bcount - 2))[0] == 0x000e) {
/* Prepare the block pointer */
block.blockHeader = bp;
- block.buffer = bp->b_data + IOBYTEOFFSETFORBLK(bp->b_blkno, VTOHFS(vp)->hfs_phys_block_size);
+ block.buffer = bp->b_data;
block.blockReadFromDisk = (bp->b_flags & B_CACHE) == 0; /* not found in cache ==> came from disk */
block.blockSize = bp->b_bcount;
static int UnpackSearchAttributeBlock(struct vnode *vp, struct attrlist *alist, searchinfospec_t *searchInfo, void *attributeBuffer);
-Boolean CheckCriteria(ExtendedVCB *vcb,
- u_long searchBits, struct attrlist *attrList,
- CatalogNodeData *cnp, CatalogKey *key,
- searchinfospec_t *searchInfo1, searchinfospec_t *searchInfo2);
+Boolean CheckCriteria( ExtendedVCB *vcb,
+ u_long searchBits,
+ struct attrlist *attrList,
+ CatalogNodeData *cnp,
+ CatalogKey *key,
+ searchinfospec_t *searchInfo1,
+ searchinfospec_t *searchInfo2,
+ Boolean lookForDup);
static int CheckAccess(CatalogNodeData *cnp, CatalogKey *key, struct proc *p);
}
//#define CompareRange(val, low, high) ((val >= low) && (val <= high))
+static Boolean IsTargetName( searchinfospec_t * searchInfoPtr, Boolean isHFSPlus );
/************************************************************************/
struct proc *p = current_proc();
CatalogNodeData myCNodeData;
CatalogNodeData * myCNodeDataPtr;
- CatalogKey * myCurrentKeyPtr;
- CatalogRecord * myCurrentDataPtr;
- CatPosition * myCatPositionPtr;
- BTScanState myBTScanState;
+ CatalogKey * myCurrentKeyPtr;
+ CatalogRecord * myCurrentDataPtr;
+ CatPosition * myCatPositionPtr;
+ BTScanState myBTScanState;
Boolean timerExpired = false;
+ Boolean doQuickExit = false;
u_long lastNodeNum = 0XFFFFFFFF;
ExtendedVCB *vcb = VTOVCB(ap->a_vp);
int err = E_NONE;
if (ap->a_options & SRCHFS_START) {
/* Starting a new search. */
+ /* make sure our meta data is synced up */
+ err = VOP_FSYNC(vcb->catalogRefNum, NOCRED, MNT_WAIT, p);
ap->a_options &= ~SRCHFS_START;
bzero( (caddr_t)myCatPositionPtr, sizeof( *myCatPositionPtr ) );
err = BTScanInitialize(catalogFCB, 0, 0, 0, kCatSearchBufferSize, &myBTScanState);
+
+#if 1 // Installer workaround
+ // hack to get around installer problems when the installer expects search results
+ // to be in key order. At this point the problem appears to be limited to
+ // searches for "Library". The idea here is to go get the "Library" at root
+ // and return it first to the caller then continue the search as normal with
+ // the exception of taking care not to return a duplicate hit (see CheckCriteria)
+ if ( err == E_NONE &&
+ (ap->a_searchattrs->commonattr & ATTR_CMN_NAME) != 0 &&
+ IsTargetName( &searchInfo1, isHFSPlus ) )
+ {
+ CatalogRecord rec;
+ BTreeIterator iterator;
+ FSBufferDescriptor btrec;
+ CatalogKey * keyp;
+ UInt16 reclen;
+ OSErr result;
+
+ bzero( (caddr_t)&iterator, sizeof( iterator ) );
+ keyp = (CatalogKey *) &iterator.key;
+ (void) BuildCatalogKeyUTF8(vcb, kRootDirID, "Library", kUndefinedStrLen, keyp, NULL);
+
+ btrec.bufferAddress = &rec;
+ btrec.itemCount = 1;
+ btrec.itemSize = sizeof( rec );
+
+ result = BTSearchRecord( catalogFCB, &iterator, kInvalidMRUCacheKey,
+ &btrec, &reclen, &iterator );
+ if ( result == E_NONE ) {
+ if ( isHFSPlus ) {
+ // HFSPlus vols have CatalogRecords that map exactly to CatalogNodeData so there is no need
+ // to copy.
+ myCNodeDataPtr = (CatalogNodeData *) &rec;
+ } else {
+ CopyCatalogNodeData( vcb, &rec, &myCNodeData );
+ myCNodeDataPtr = &myCNodeData;
+ }
+
+ if (CheckCriteria(vcb, ap->a_options, ap->a_searchattrs, myCNodeDataPtr,
+ keyp, &searchInfo1, &searchInfo2, false) &&
+ CheckAccess(myCNodeDataPtr, keyp, ap->a_uio->uio_procp)) {
+
+ result = InsertMatch(ap->a_vp, ap->a_uio, myCNodeDataPtr,
+ keyp, ap->a_returnattrs,
+ attributesBuffer, variableBuffer,
+ eachReturnBufferSize, ap->a_nummatches);
+ if (result == E_NONE && *(ap->a_nummatches) >= ap->a_maxmatches)
+ doQuickExit = true;
+ }
+ }
+ }
+#endif // Installer workaround
} else {
/* Resuming a search. */
err = BTScanInitialize(catalogFCB, myCatPositionPtr->nextNode,
(void) hfs_metafilelocking(VTOHFS(ap->a_vp), kHFSCatalogFileID, LK_RELEASE, p);
if (err)
goto ExitThisRoutine;
-
+ if ( doQuickExit )
+ goto QuickExit;
/*
* Check all the catalog btree records...
* return the attributes for matching items
}
if (CheckCriteria(vcb, ap->a_options, ap->a_searchattrs, myCNodeDataPtr,
- myCurrentKeyPtr, &searchInfo1, &searchInfo2) &&
+ myCurrentKeyPtr, &searchInfo1, &searchInfo2, true) &&
CheckAccess(myCNodeDataPtr, myCurrentKeyPtr, ap->a_uio->uio_procp)) {
err = InsertMatch(ap->a_vp, ap->a_uio, myCNodeDataPtr,
timerExpired = true;
}
}
-
+QuickExit:
/* Update catalog position */
myCatPositionPtr->writeCount = myBTScanState.btcb->writeCount;
- BTScanTerminate(&myBTScanState, &myCatPositionPtr->nextNode,
- &myCatPositionPtr->nextRecord,
- &myCatPositionPtr->recordsFound);
+ BTScanTerminate(&myBTScanState, &myCatPositionPtr->nextNode,
+ &myCatPositionPtr->nextRecord,
+ &myCatPositionPtr->recordsFound);
if ( err == E_NONE ) {
err = EAGAIN; /* signal to the user to call searchfs again */
}
Boolean
-CheckCriteria( ExtendedVCB *vcb, u_long searchBits,
- struct attrlist *attrList, CatalogNodeData *cnp, CatalogKey *key,
- searchinfospec_t *searchInfo1, searchinfospec_t *searchInfo2 )
+CheckCriteria( ExtendedVCB *vcb,
+ u_long searchBits,
+ struct attrlist *attrList,
+ CatalogNodeData *cnp,
+ CatalogKey *key,
+ searchinfospec_t *searchInfo1,
+ searchinfospec_t *searchInfo2,
+ Boolean lookForDup )
{
Boolean matched, atleastone;
Boolean isHFSPlus;
else /* full HFS name match */
matched = (FastRelString(key->hfs.nodeName, (u_char*)searchInfo1->name) == 0);
}
+
+#if 1 // Installer workaround
+ if ( lookForDup ) {
+ HFSCatalogNodeID parentID;
+ if (isHFSPlus)
+ parentID = key->hfsPlus.parentID;
+ else
+ parentID = key->hfs.parentID;
+
+ if ( matched && parentID == kRootDirID &&
+ IsTargetName( searchInfo1, isHFSPlus ) )
+ matched = false;
+ }
+#endif // Installer workaround
if ( matched == false || (searchBits & ~SRCHFS_MATCHPARTIALNAMES) == 0 )
goto TestDone; /* no match, or nothing more to compare */
}
+/* this routine was added as part of the work around where some installers would fail */
+/* because they incorrectly assumed search results were in some kind of order. */
+/* This routine is used to indentify the problematic target. At this point we */
+/* only know of one. This routine could be modified for more (I hope not). */
+static Boolean IsTargetName( searchinfospec_t * searchInfoPtr, Boolean isHFSPlus )
+{
+ if ( searchInfoPtr->name == NULL )
+ return( false );
+
+ if (isHFSPlus) {
+ HFSUniStr255 myName = {
+ 7, /* number of unicode characters */
+ {
+ 'L','i','b','r','a','r','y'
+ }
+ };
+ if ( FastUnicodeCompare( myName.unicode, myName.length,
+ (UniChar*)searchInfoPtr->name,
+ searchInfoPtr->nameLength ) == 0 ) {
+ return( true );
+ }
+
+ } else {
+ u_char myName[32] = {
+ 0x07,'L','i','b','r','a','r','y'
+ };
+ if ( FastRelString(myName, (u_char*)searchInfoPtr->name) == 0 ) {
+ return( true );
+ }
+ }
+ return( false );
+
+} /* IsTargetName */
+
+
#include <sys/mount.h>
#include <sys/malloc.h>
#include <sys/stat.h>
-#include <dev/disk.h>
#include <sys/lock.h>
#include <miscfs/specfs/specdev.h>
#include <hfs/hfs_mount.h>
int hfs_dbg_test = 0;
#endif
+
+/*
+ * These come from IOKit/storage/IOMediaBSDClient.h
+ */
+#define DKIOCGETBLOCKSIZE _IOR('d', 24, u_int32_t)
+#define DKIOCSETBLOCKSIZE _IOW('d', 24, u_int32_t)
+#define DKIOCGETBLOCKCOUNT _IOR('d', 25, u_int64_t)
+
/*
* HFS File System globals:
*/
register struct vnode *vp, *nvp, *devvp;
struct hfsnode *hp;
struct buf *bp;
- int size, error, i;
+ int sectorsize;
+ int error, i;
struct hfsmount *hfsmp;
struct HFSPlusVolumeHeader *vhp;
ExtendedVCB *vcb;
/*
* Re-read VolumeHeader from disk.
*/
- size = kMDBSize;
- error = bread( hfsmp->hfs_devvp,
- IOBLKNOFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size),
- IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, size),
- NOCRED,
- &bp);
+ sectorsize = hfsmp->hfs_phys_block_size;
+
+ error = meta_bread(hfsmp->hfs_devvp,
+ (vcb->hfsPlusIOPosOffset / sectorsize) + HFS_PRI_SECTOR(sectorsize),
+ sectorsize, NOCRED, &bp);
if (error) {
if (bp != NULL)
brelse(bp);
return (error);
}
- vhp = (HFSPlusVolumeHeader *) ((char *)bp->b_data +
- IOBYTEOFFSETFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size));
+ vhp = (HFSPlusVolumeHeader *) (bp->b_data + HFS_PRI_OFFSET(sectorsize));
if ((ValidVolumeHeader(vhp) != 0) || (vcb->blockSize != SWAP_BE32 (vhp->blockSize))) {
brelse(bp);
HFSMasterDirectoryBlock *mdbp;
int ronly;
struct ucred *cred;
- u_long diskBlks;
- u_long blksize;
+ u_int64_t disksize;
+ u_int64_t blkcnt;
+ u_int32_t blksize;
+ u_int32_t minblksize;
DBG_VFS(("hfs_mountfs: mp = 0x%lX\n", (u_long)mp));
dev = devvp->v_rdev;
if ((retval = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p)))
return (retval);
- blksize = kHFSBlockSize;
- DBG_VFS(("hfs_mountfs: size = %d (DEV_BSIZE = %d).\n", blksize, DEV_BSIZE));
+ bp = NULL;
+ hfsmp = NULL;
+ minblksize = kHFSBlockSize;
- bp = NULL;
- hfsmp = NULL;
+ /* Get the real physical block size. */
+ if (VOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ /* Switch to 512 byte sectors (temporarily) */
+ if (blksize > 512) {
+ u_int32_t size512 = 512;
+
+ if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ }
+ /* Get the number of 512 byte physical blocks. */
+ if (VOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ /* Compute an accurate disk size (i.e. within 512 bytes) */
+ disksize = blkcnt * (u_int64_t)512;
- /*
- * XXX SER Currently we only support 512 block size systems. This might change
- * So this is a place holder to remind us that the mdb might not be 512 aligned
- * retval = VOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, &blksize, FWRITE, cred, p);
- * if (retval) return retval;
+ /*
+ * For large volumes use a 4K physical block size.
*/
+ if (blkcnt > (u_int64_t)0x000000007fffffff) {
+ minblksize = blksize = 4096;
+ }
- /*
- * the next three lines should probably be replaced
- * with a call to the yet unimplemented function VOP_SETBLOCKSIZE
+ /* Now switch to our prefered physical block size. */
+ if (blksize > 512) {
+ if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ /* Get the count of physical blocks. */
+ if (VOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ }
+
+ /*
+ * At this point:
+ * minblksize is the minimum physical block size
+ * blksize has our prefered physical block size
+ * blkcnt has the total number of physical blocks
*/
- retval = VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &blksize, FWRITE, cred, p);
- if (retval) return retval;
+
devvp->v_specsize = blksize;
/* cache the IO attributes */
return (retval);
}
- DBG_VFS(("hfs_mountfs: reading MDB [block no. %d + %d bytes, size %d bytes]...\n",
- IOBLKNOFORBLK(kMasterDirectoryBlock, blksize),
- IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, blksize),
- IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, blksize)));
-
- if ((retval = bread(devvp, IOBLKNOFORBLK(kMasterDirectoryBlock, blksize),
- IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, blksize), cred, &bp))) {
- goto error_exit;
- };
- mdbp = (HFSMasterDirectoryBlock*) ((char *)bp->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, blksize));
+ if ((retval = meta_bread(devvp, HFS_PRI_SECTOR(blksize), blksize, cred, &bp))) {
+ goto error_exit;
+ }
+ mdbp = (HFSMasterDirectoryBlock*) (bp->b_data + HFS_PRI_OFFSET(blksize));
- MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK);
- bzero(hfsmp, sizeof(struct hfsmount));
+ MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK);
+ bzero(hfsmp, sizeof(struct hfsmount));
simple_lock_init(&hfsmp->hfs_renamelock);
- DBG_VFS(("hfs_mountfs: Initializing hfsmount structure at 0x%lX...\n", (u_long)hfsmp));
/*
* Init the volume information structure
*/
hfsmp->hfs_raw_dev = devvp->v_rdev;
hfsmp->hfs_devvp = devvp;
hfsmp->hfs_phys_block_size = blksize;
-
- /* The hfs_log_block_size field is updated in the respective hfs_MountHFS[Plus]Volume routine */
- hfsmp->hfs_logBlockSize = BestBlockSizeFit(SWAP_BE32 (mdbp->drAlBlkSiz), MAXBSIZE, hfsmp->hfs_phys_block_size);
+ hfsmp->hfs_phys_block_count = blkcnt;
hfsmp->hfs_fs_ronly = ronly;
hfsmp->hfs_unknownpermissions = ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) != 0);
if (args) {
hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */
};
};
-
- /* See above comment for DKIOCGETBLOCKSIZE
- * retval = VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &blksize, FWRITE, cred, p);
- * if (retval) return retval;
- */
-
- retval = VOP_IOCTL(devvp, DKIOCNUMBLKS, (caddr_t)&diskBlks, 0, cred, p);
- if (retval) return retval;
-
- if (SWAP_BE16 (mdbp->drSigWord) == kHFSPlusSigWord) {
- /* Enidan swap volume header in place */
- /* SWAP_HFS_PLUS_VOLUME_HEADER ((HFSPlusVolumeHeader *)bp->b_data); */
- /* mount wrapper-less HFS-Plus volume */
- (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
- retval = hfs_MountHFSPlusVolume(hfsmp, (HFSPlusVolumeHeader*) bp->b_data, 0, diskBlks, p);
-
- /* Enidan un-swap volume header in place */
- /* SWAP_HFS_PLUS_VOLUME_HEADER ((HFSPlusVolumeHeader *)bp->b_data); */
-
- } else if (SWAP_BE16 (mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
- u_long embBlkOffset;
- HFSPlusVolumeHeader *vhp;
-
- embBlkOffset = SWAP_BE16 (mdbp->drAlBlSt) +
- (SWAP_BE16 (mdbp->drEmbedExtent.startBlock) * (SWAP_BE32 (mdbp->drAlBlkSiz)/kHFSBlockSize));
- /* calculate virtual number of 512-byte sectors */
- diskBlks = SWAP_BE16 (mdbp->drEmbedExtent.blockCount) * (SWAP_BE32 (mdbp->drAlBlkSiz)/kHFSBlockSize);
-
- brelse(bp);
- bp = NULL; /* done with MDB, go grab Volume Header */
- mdbp = NULL;
-
- retval = bread( devvp,
- IOBLKNOFORBLK(kMasterDirectoryBlock+embBlkOffset, blksize),
- IOBYTECCNTFORBLK(kMasterDirectoryBlock+embBlkOffset, kMDBSize, blksize),
- cred,
- &bp);
- if (retval) {
+ /* Mount a standard HFS disk */
+ if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) &&
+ (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord)) {
+ if (devvp == rootvp) {
+ retval = EINVAL; /* Cannot root from HFS standard disks */
goto error_exit;
- };
- vhp = (HFSPlusVolumeHeader*) ((char *)bp->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, blksize));
-
- /* Enidan swap volume header in place */
- /* SWAP_HFS_PLUS_VOLUME_HEADER (vhp); */
-
- /* mount embedded HFS Plus volume */
- (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
- retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embBlkOffset, diskBlks, p);
-
- /* Enidan un-swap volume header in place */
- /* SWAP_HFS_PLUS_VOLUME_HEADER (vhp); */
-
- } else if (devvp != rootvp) {
+ }
+ /* HFS disks can only use 512 byte physical blocks */
+ if (blksize > kHFSBlockSize) {
+ blksize = kHFSBlockSize;
+ if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ if (VOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ /* XXX do we need to call vfs_init_io_attributes again ? */
+ devvp->v_specsize = blksize;
+ hfsmp->hfs_phys_block_size = blksize;
+ hfsmp->hfs_phys_block_count = blkcnt;
+ }
if (args) {
hfsmp->hfs_encoding = args->hfs_encoding;
HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding;
-
/* establish the timezone */
gTimeZone = args->hfs_timezone;
}
retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
- if (retval) goto error_exit;
+ if (retval)
+ goto error_exit;
- /* mount HFS volume */
- retval = hfs_MountHFSVolume( hfsmp, mdbp, diskBlks, p);
-
+ retval = hfs_MountHFSVolume(hfsmp, mdbp, p);
if (retval)
(void) hfs_relconverter(hfsmp->hfs_encoding);
- } else {
- /* sorry, we cannot root from HFS */
- retval = EINVAL;
- }
+ } else /* Mount an HFS Plus disk */ {
+ HFSPlusVolumeHeader *vhp;
+ off_t embeddedOffset;
+
+ /* Get the embedded Volume Header */
+ if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
+ embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize;
+ embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) *
+ (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
+
+ disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) *
+ (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
+
+ hfsmp->hfs_phys_block_count = disksize / blksize;
+
+ brelse(bp);
+ bp = NULL;
+ mdbp = NULL;
+
+ /*
+ * If the embedded volume doesn't start on a block
+ * boundary, then switch the device to a 512-byte
+ * block size so everything will line up on a block
+ * boundary.
+ */
+ if ((embeddedOffset % blksize) != 0) {
+ printf("HFS Mount: embedded volume offset not"
+ " a multiple of physical block size (%d);"
+ " switching to 512\n", blksize);
+ blksize = 512;
+ if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE,
+ (caddr_t)&blksize, FWRITE, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ if (VOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT,
+ (caddr_t)&blkcnt, 0, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ /* XXX do we need to call vfs_init_io_attributes again? */
+ devvp->v_specsize = blksize;
+ /* Note: relative block count adjustment */
+ hfsmp->hfs_phys_block_count *=
+ hfsmp->hfs_phys_block_size / blksize;
+ hfsmp->hfs_phys_block_size = blksize;
+ }
+
+ retval = meta_bread(devvp, (embeddedOffset / blksize) + HFS_PRI_SECTOR(blksize),
+ blksize, cred, &bp);
+ if (retval)
+ goto error_exit;
+ vhp = (HFSPlusVolumeHeader*) (bp->b_data + HFS_PRI_OFFSET(blksize));
+
+ } else /* pure HFS+ */ {
+ embeddedOffset = 0;
+ vhp = (HFSPlusVolumeHeader*) mdbp;
+ }
+
+ (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
+
+ retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p);
+ /*
+ * If the backend didn't like our physical blocksize
+ * then retry with physical blocksize of 512.
+ */
+ if ((retval == ENXIO) && (blksize > 512) && (blksize != minblksize)) {
+ printf("HFS Mount: could not use physical block size "
+ "(%d) switching to 512\n", blksize);
+ blksize = 512;
+ if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ if (VOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, cred, p)) {
+ retval = ENXIO;
+ goto error_exit;
+ }
+ /* XXX do we need to call vfs_init_io_attributes again ? */
+ devvp->v_specsize = blksize;
+ /* Note: relative block count adjustment (in case this is an embedded volume). */
+ hfsmp->hfs_phys_block_count *= hfsmp->hfs_phys_block_size / blksize;
+ hfsmp->hfs_phys_block_size = blksize;
+
+ /* Try again with a smaller block size... */
+ retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p);
+ }
+ if (retval)
+ (void) hfs_relconverter(0);
+ }
if ( retval ) {
goto error_exit;
FCB *fcb;
HFSPlusVolumeHeader *volumeHeader;
int retval;
- int size = sizeof(HFSPlusVolumeHeader);
struct buf *bp;
int i;
+ int sectorsize;
+ int priIDSector;
if (vcb->vcbSigWord != kHFSPlusSigWord)
return EINVAL;
- retval = bread(hfsmp->hfs_devvp, IOBLKNOFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size),
- IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, size), NOCRED, &bp);
+ sectorsize = hfsmp->hfs_phys_block_size;
+ priIDSector = (vcb->hfsPlusIOPosOffset / sectorsize) +
+ HFS_PRI_SECTOR(sectorsize);
+
+ retval = meta_bread(hfsmp->hfs_devvp, priIDSector, sectorsize, NOCRED, &bp);
if (retval) {
DBG_VFS((" hfs_flushvolumeheader bread return error! (%d)\n", retval));
if (bp) brelse(bp);
DBG_ASSERT(bp->b_data != NULL);
DBG_ASSERT(bp->b_bcount == size);
- volumeHeader = (HFSPlusVolumeHeader *)((char *)bp->b_data +
- IOBYTEOFFSETFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size));
+ volumeHeader = (HFSPlusVolumeHeader *)((char *)bp->b_data + HFS_PRI_OFFSET(sectorsize));
/*
* For embedded HFS+ volumes, update create date if it changed
struct buf *bp2;
HFSMasterDirectoryBlock *mdb;
- retval = bread(hfsmp->hfs_devvp, IOBLKNOFORBLK(kMasterDirectoryBlock, kMDBSize),
- IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, kMDBSize), NOCRED, &bp2);
+ retval = meta_bread(hfsmp->hfs_devvp, HFS_PRI_SECTOR(sectorsize), sectorsize, NOCRED, &bp2);
if (retval != E_NONE) {
if (bp2) brelse(bp2);
} else {
- mdb = (HFSMasterDirectoryBlock *)((char *)bp2->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, kMDBSize));
+ mdb = (HFSMasterDirectoryBlock *)(bp2->b_data + HFS_PRI_OFFSET(sectorsize));
if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate )
{
//*******************************************************************************
OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb,
- u_long sectors, struct proc *p)
+ struct proc *p)
{
ExtendedVCB *vcb = HFSTOVCB(hfsmp);
struct vnode *tmpvnode;
if (err || (utf8chars == 0))
(void) mac_roman_to_utf8(mdb->drVN, NAME_MAX, &utf8chars, vcb->vcbVN);
- vcb->altIDSector = sectors - 2;
-
// Initialize our dirID/nodePtr cache associated with this volume.
err = InitMRUCache( sizeof(UInt32), kDefaultNumMRUCacheBlocks, &(vcb->hintCachePtr) );
ReturnIfError( err );
//*******************************************************************************
OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp,
- u_long embBlkOffset, u_long sectors, struct proc *p)
+ off_t embeddedOffset, off_t disksize, struct proc *p)
{
register ExtendedVCB *vcb;
HFSPlusForkData *fdp;
/* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */
if (hfsmp->hfs_fs_ronly == 0 && (SWAP_BE32 (vhp->attributes) & kHFSVolumeUnmountedMask) == 0)
return (EINVAL);
+
+ /* Make sure we can live with the physical block size. */
+ if ((disksize & (hfsmp->hfs_phys_block_size - 1)) ||
+ (embeddedOffset & (hfsmp->hfs_phys_block_size - 1)) ||
+ (SWAP_BE32(vhp->blockSize) < hfsmp->hfs_phys_block_size)) {
+ return (ENXIO);
+ }
+
/*
* The VolumeHeader seems OK: transfer info from it into VCB
* Note - the VCB starts out clear (all zeros)
vcb->checkedDate = SWAP_BE32 (vhp->checkedDate);
vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap);
- vcb->hfsPlusIOPosOffset = embBlkOffset * 512;
-
- vcb->altIDSector = embBlkOffset + sectors - 2;
+ vcb->hfsPlusIOPosOffset = embeddedOffset;
vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* in local time, not GMT! */
case fileBoundsErr: /* -1309 */
return EINVAL; /* +22 */
+ case fsBTBadNodeSize:
+ return ENXIO;
default:
DBG_UTILS(("Unmapped MacOS error: %d\n", err));
return EIO; /* +5 */
goto out;
/* Write the link to disk */
- bp = getblk(vp, 0, roundup((int)hp->fcbEOF, kHFSBlockSize), 0, 0, BLK_META);
+ bp = getblk(vp, 0, roundup((int)hp->fcbEOF, VTOHFS(vp)->hfs_phys_block_size), 0, 0, BLK_META);
bzero(bp->b_data, bp->b_bufsize);
bcopy(ap->a_target, bp->b_data, len);
bp->b_flags |= B_DIRTY;
if (H_ISBIGLINK(hp))
MALLOC(hp->h_symlinkptr, char *, hp->fcbEOF, M_TEMP, M_WAITOK);
- retval = meta_bread(vp, 0, roundup((int)hp->fcbEOF, kHFSBlockSize), ap->a_cred, &bp);
+ retval = meta_bread(vp, 0, roundup((int)hp->fcbEOF, VTOHFS(vp)->hfs_phys_block_size),
+ ap->a_cred, &bp);
if (retval) {
if (bp)
brelse(bp);
/////////////////////////// Read Header Node ////////////////////////////////
nodeRec.buffer = nil; // so we can call ReleaseNode
- nodeRec.blockSize = kMinNodeSize;
btreePtr->fileRefNum = GetFileRefNumFromFCB(filePtr);
filePtr->fcbBTCBPtr = (Ptr) btreePtr; // attach btree cb to file
+ /* The minimum node size is the physical block size */
+ nodeRec.blockSize = VTOHFS(btreePtr->fileRefNum)->hfs_phys_block_size;
+
REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false);
// it is now safe to call M_ExitOnError (err)
- err = setBlockSizeProc (btreePtr->fileRefNum, kMinNodeSize, 1);
+ err = setBlockSizeProc (btreePtr->fileRefNum, nodeRec.blockSize, 1);
M_ExitOnError (err);
//\80\80 set kBadClose attribute bit, and UpdateNode
- // if nodeSize is 512 then we don't need to release, just CheckNode
+ // if nodeSize matches then we don't need to release, just CheckNode
+
+ /* b-tree node size must be at least as big as the physical block size */
+ if (btreePtr->nodeSize < nodeRec.blockSize) {
+ err = fsBTBadNodeSize;
+ goto ErrorExit;
+ }
- if ( btreePtr->nodeSize == kMinNodeSize )
+ if ( btreePtr->nodeSize == nodeRec.blockSize )
{
err = CheckNode (btreePtr, nodeRec.buffer);
if (err)
kDataForkType = 0,
kResourceForkType = 0xFF,
- kPreviousRecord = -1,
-
- kSectorSize = 512 // Size of a physical sector
+ kPreviousRecord = -1
};
void HFSToHFSPlusExtents(
FCB *fcb, // FCB of file
size_t numberOfBytes, // number of contiguous bytes desired
off_t offset, // starting offset within file (in bytes)
- daddr_t *startSector, // first 512-byte sector (NOT an allocation block)
+ daddr_t *startSector, // first sector (NOT an allocation block)
size_t *availableBytes) // number of contiguous bytes (up to numberOfBytes)
{
OSErr err;
UInt32 allocBlockSize; // Size of the volume's allocation block
+ UInt32 sectorSize;
HFSPlusExtentKey foundKey;
HFSPlusExtentRecord foundData;
UInt32 foundIndex;
off_t tmpOff;
allocBlockSize = vcb->blockSize;
+ sectorSize = VCBTOHFS(vcb)->hfs_phys_block_size;
err = SearchExtentFile(vcb, fcb, offset, &foundKey, foundData, &foundIndex, &hint, &nextFABN);
if (err == noErr) {
dataEnd = fcb->fcbPLen; // Yes, so only map up to PEOF
// Compute the number of sectors in an allocation block
- sectorsPerBlock = allocBlockSize / kSectorSize; // sectors per allocation block
+ sectorsPerBlock = allocBlockSize / sectorSize; // sectors per allocation block
//
// Compute the absolute sector number that contains the offset of the given file
//
- temp = (daddr_t)((offset - (off_t)((off_t)(firstFABN) * (off_t)(allocBlockSize)))/kSectorSize); // offset in sectors from start of the extent
+
+ // offset in sectors from start of the extent
+ temp = (daddr_t)((offset - (off_t)((off_t)(firstFABN) * (off_t)(allocBlockSize)))/sectorSize);
+ // offset in sectors from start of allocation block space
temp += startBlock * sectorsPerBlock; // offset in sectors from start of allocation block space
- if (vcb->vcbSigWord == kHFSPlusSigWord)
- temp += vcb->hfsPlusIOPosOffset/512; /* offset inside wrapper */
- else
- temp += vcb->vcbAlBlSt; /* offset in sectors from start of volume */
+ if (vcb->vcbSigWord == kHFSPlusSigWord)
+ temp += vcb->hfsPlusIOPosOffset / sectorSize; /* offset inside wrapper */
+ else
+ temp += vcb->vcbAlBlSt; /* offset in sectors from start of volume */
// Return the desired sector for file position "offset"
*startSector = temp;
return err;
}
-extern OSErr LookupBufferMapping(caddr_t bufferAddress, struct buf **bpp, int *mappingIndexPtr);
-
/*
_______________________________________________________________________
UInt16 dataSize,
UInt32 * newHint);
-/* Prototypes for C->Asm glue*/
-EXTERN_API_C( OSErr )
-GetBlock_glue (UInt16 flags,
- UInt32 nodeNumber,
- Ptr * nodeBuffer,
- FileReference refNum,
- ExtendedVCB * vcb);
-
-EXTERN_API_C( OSErr )
-RelBlock_glue (Ptr nodeBuffer,
- UInt16 flags);
/* Prototypes for exported routines in VolumeAllocation.c*/
EXTERN_API_C( OSErr )
#include <net/netisr.h>
#include <net/if_types.h>
+#include <machine/machine_routines.h>
#define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
#define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
* bind it to the master cpu.
*/
stack_privilege(self);
+ ml_thread_policy(current_thread(), MACHINE_GROUP, (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
/* The dlil thread is always funneled */
thread_funnel_set(network_flock, TRUE);
int s;
CLR(bp->b_flags, B_INVAL | B_NOCACHE);
- SET(bp->b_flags, B_DELWRI);
+ if (!ISSET(bp->b_flags, B_DELWRI)) {
+ extern int nbdwrite;
+ SET(bp->b_flags, B_DELWRI);
+ nbdwrite++;
+ }
FSDBG(261, bp->b_validoff, bp->b_validend,
bp->b_bufsize, bp->b_bcount);
/*
np->n_size = vap->va_size;
} else
np->n_size = vap->va_size;
- if (dontshrink && UBCISVALID(vp) &&
- np->n_size < ubc_getsize(vp)) {
+ if (!UBCINFOEXISTS(vp) ||
+ dontshrink && np->n_size < ubc_getsize(vp)) {
vap->va_size = np->n_size = orig_size;
np->n_attrstamp = 0;
} else
s = splbio();
CLR(bp->b_flags, (B_READ|B_DONE|B_ERROR|B_DELWRI));
+ if (ISSET(oldflags, B_DELWRI)) {
+ extern int nbdwrite;
+ nbdwrite--;
+ }
if (ISSET(oldflags, (B_ASYNC|B_DELWRI))) {
reassignbuf(bp, vp);
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
extern int niobuf; /* The number of IO buffer headers for cluster IO */
int blaundrycnt;
+/* zone allocated buffer headers */
+static zone_t buf_hdr_zone;
+static int buf_hdr_count;
+
#if TRACE
struct proc *traceproc;
int tracewhich, tracebuf[TRCSIZ];
/* Definitions for the buffer stats. */
struct bufstats bufstats;
+/* Number of delayed write buffers */
+int nbdwrite = 0;
+
/*
* Insq/Remq for the buffer hash lists.
*/
TAILQ_HEAD(ioqueue, buf) iobufqueue;
TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
-int needbuffer;
-int need_iobuffer;
+static int needbuffer;
+static int need_iobuffer;
/*
* Insq/Remq for the buffer free lists.
simple_lock_data_t bufhashlist_slock; /* lock on buffer hash list */
+/* number of per vnode, "in flight" buffer writes */
+#define BUFWRITE_THROTTLE 9
+
/*
* Time in seconds before a buffer on a list is
* considered as a stale buffer
sync = !ISSET(bp->b_flags, B_ASYNC);
wasdelayed = ISSET(bp->b_flags, B_DELWRI);
CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
+ if (wasdelayed)
+ nbdwrite--;
if (!sync) {
/*
p->p_stats->p_ru.ru_oublock++; /* XXX */
}
- trace(TR_BWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
+ trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
/* Initiate disk write. Make sure the appropriate party is charged. */
SET(bp->b_flags, B_WRITEINPROG);
* written in the order that the writes are requested.
*
* Described in Leffler, et al. (pp. 208-213).
+ *
+ * Note: With the abilitty to allocate additional buffer
+ * headers, we can get in to the situation where "too" many
+ * bdwrite()s can create situation where the kernel can create
+ * buffers faster than the disks can service. Doing a bawrite() in
+ * cases were we have "too many" outstanding bdwrite()s avoids that.
*/
void
bdwrite(bp)
struct buf *bp;
{
struct proc *p = current_proc();
- kern_return_t kret;
- upl_t upl;
- upl_page_info_t *pl;
+ struct vnode *vp = bp->b_vp;
/*
* If the block hasn't been seen before:
SET(bp->b_flags, B_DELWRI);
if (p && p->p_stats)
p->p_stats->p_ru.ru_oublock++; /* XXX */
-
- reassignbuf(bp, bp->b_vp);
+ nbdwrite ++;
+ reassignbuf(bp, vp);
}
return;
}
+ /*
+ * If the vnode has "too many" write operations in progress
+ * wait for them to finish the IO
+ */
+ while (vp->v_numoutput >= BUFWRITE_THROTTLE) {
+ vp->v_flag |= VTHROTTLED;
+ (void)tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "bdwrite", 0);
+ }
+
+ /*
+ * If we have too many delayed write buffers,
+ * more than we can "safely" handle, just fall back to
+ * doing the async write
+ */
+ if (nbdwrite < 0)
+ panic("bdwrite: Negative nbdwrite");
+
+ if (nbdwrite > ((nbuf/4)*3)) {
+ bawrite(bp);
+ return;
+ }
+
/* Otherwise, the "write" is done, so mark and release the buffer. */
SET(bp->b_flags, B_DONE);
brelse(bp);
/*
* Asynchronous block write; just an asynchronous bwrite().
+ *
+ * Note: With the abilitty to allocate additional buffer
+ * headers, we can get in to the situation where "too" many
+ * bawrite()s can create situation where the kernel can create
+ * buffers faster than the disks can service.
+ * We limit the number of "in flight" writes a vnode can have to
+ * avoid this.
*/
void
bawrite(bp)
struct buf *bp;
{
+ struct vnode *vp = bp->b_vp;
+
+ if (vp) {
+ /*
+ * If the vnode has "too many" write operations in progress
+ * wait for them to finish the IO
+ */
+ while (vp->v_numoutput >= BUFWRITE_THROTTLE) {
+ vp->v_flag |= VTHROTTLED;
+ (void)tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "bawrite", 0);
+ }
+ }
SET(bp->b_flags, B_ASYNC);
VOP_BWRITE(bp);
*/
if (bp->b_vp)
brelvp(bp);
- CLR(bp->b_flags, B_DELWRI);
+ if (ISSET(bp->b_flags, B_DELWRI)) {
+ CLR(bp->b_flags, B_DELWRI);
+ nbdwrite--;
+ }
if (bp->b_bufsize <= 0)
whichq = BQ_EMPTY; /* no data */
else
};
#endif /* ZALLOC_METADATA */
-zone_t buf_hdr_zone;
-int buf_hdr_count;
-
/*
* Initialize the meta data zones
*/
{
int s;
struct ucred *cred;
+ int hdralloc = 0;
s = splbio();
/* Buffer is no longer on free lists. */
SET(bp->b_flags, B_BUSY);
+ /* Check whether the buffer header was "allocated" */
+ if (ISSET(bp->b_flags, B_HDRALLOC))
+ hdralloc = 1;
+
if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
panic("bcleanbuf: le_prev is deadbeef");
bp->b_bufsize = 0;
bp->b_data = 0;
bp->b_flags = B_BUSY;
+ if (hdralloc)
+ SET(bp->b_flags, B_HDRALLOC);
bp->b_dev = NODEV;
bp->b_blkno = bp->b_lblkno = 0;
bp->b_iodone = 0;
struct buf *bp;
{
boolean_t funnel_state;
- int s;
+ struct vnode *vp;
funnel_state = thread_funnel_set(kernel_flock, TRUE);
if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW))
vwakeup(bp); /* wake up reader */
+ /* Wakeup the throttled write operations as needed */
+ vp = bp->b_vp;
+ if (vp
+ && (vp->v_flag & VTHROTTLED)
+ && (vp->v_numoutput <= (BUFWRITE_THROTTLE / 3))) {
+ vp->v_flag &= ~VTHROTTLED;
+ wakeup((caddr_t)&vp->v_numoutput);
+ }
+
if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
CLR(bp->b_flags, B_CALL); /* but note callout done */
(*bp->b_iodone)(bp);
* can be outstanding on a single vnode
* before we issue a synchronous write
*/
-#define ASYNC_THROTTLE 6
+#define ASYNC_THROTTLE 9
static int
cluster_iodone(bp)
if (ubc_page_op(vp, f_offset, 0, 0, 0) == KERN_SUCCESS) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
(int)f_offset, 0, 0, 0, 0);
- return(0);
+ return(1);
}
if (size > (MAX_UPL_TRANSFER * PAGE_SIZE))
size = MAX_UPL_TRANSFER * PAGE_SIZE;
daddr_t r_lblkno;
off_t f_offset;
int size_of_prefetch;
- int max_iosize;
int max_pages;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
return;
}
- vfs_io_attributes(vp, B_READ, &max_iosize, &max_pages);
-
- if ((max_iosize / PAGE_SIZE) < max_pages)
- max_pages = max_iosize / PAGE_SIZE;
- if (max_pages > MAX_UPL_TRANSFER)
- max_pages = MAX_UPL_TRANSFER;
+ max_pages = MAX_UPL_TRANSFER;
vp->v_ralen = vp->v_ralen ? min(max_pages, vp->v_ralen << 1) : 1;
#include <IOKit/IOTimerEventSource.h>
#include <IOKit/IOPlatformExpert.h>
#include <IOKit/pwr_mgt/RootDomain.h>
-#include <IOKit/pwr_mgt/IOPM.h>
+#include <IOKit/pwr_mgt/IOPMPrivate.h>
#include <IOKit/IOMessage.h>
#include "RootDomainUserClient.h"
#include "IOKit/pwr_mgt/IOPowerConnection.h"
-extern "C" {
-extern void kprintf(const char *, ...);
-}
+extern "C" void kprintf(const char *, ...);
extern const IORegistryPlane * gIOPowerPlane;
void PMreceiveCmd ( OSObject *, void *, void *, void *, void * );
static void sleepTimerExpired(thread_call_param_t);
+static void wakeupClamshellTimerExpired ( thread_call_param_t us);
#define number_of_power_states 5
bool IOPMrootDomain::start ( IOService * nub )
{
+ OSDictionary *tmpDict;
+
super::start(nub);
gRootDomain = this;
canSleep = true;
wrangler = NULL;
sleepASAP = false;
+ ignoringClamshellDuringWakeup = false;
+ tmpDict = OSDictionary::withCapacity(1);
+ setProperty(kRootDomainSupportedFeatures, tmpDict);
+ tmpDict->release();
+
pm_vars->PMworkloop = IOWorkLoop::workLoop(); // make the workloop
pm_vars->commandQueue = IOCommandQueue::commandQueue(this, PMreceiveCmd); // make a command queue
if (! pm_vars->commandQueue ||
return IOPMNoErr;
}
extraSleepTimer = thread_call_allocate((thread_call_func_t)sleepTimerExpired, (thread_call_param_t) this);
-
+ clamshellWakeupIgnore = thread_call_allocate((thread_call_func_t)wakeupClamshellTimerExpired, (thread_call_param_t) this);
diskSyncCalloutEntry = thread_call_allocate(&disk_sync_callout, (thread_call_param_t) this);
patriarch = new IORootParent; // create our parent
((IOPMrootDomain *)us)->handleSleepTimerExpiration();
}
+
+static void wakeupClamshellTimerExpired ( thread_call_param_t us)
+{
+ ((IOPMrootDomain *)us)->stopIgnoringClamshellEventsDuringWakeup();
+}
+
// **********************************************************************************
// handleSleepTimerExpiration
}
+void IOPMrootDomain::stopIgnoringClamshellEventsDuringWakeup(void)
+{
+ OSObject * state;
+
+ // Allow clamshell-induced sleep now
+ ignoringClamshellDuringWakeup = false;
+
+ if ((state = getProperty(kAppleClamshellStateKey)))
+ publishResource(kAppleClamshellStateKey, state);
+}
+
//*********************************************************************************
// setAggressiveness
//
IOReturn IOPMrootDomain::setAggressiveness ( unsigned long type, unsigned long newLevel )
{
+
if ( systemBooting && (type == kPMMinutesToDim) ) {
systemBooting = false; // when the login window launches, this method gets called -- system booting is done.
IOLog("Root power domain receiving initial preferences\n");
clock_interval_to_deadline(30, kSecondScale, &deadline); // stay awake for at least 30 seconds
thread_call_enter_delayed(extraSleepTimer, deadline);
idleSleepPending = true; // this gets turned off when we sleep again
+
+ // Ignore closed clamshell during wakeup and for a few seconds
+ // after wakeup is complete
+ ignoringClamshellDuringWakeup = true;
+
gSleepOrShutdownPending = 0; // sleep transition complete
patriarch->wakeSystem(); // get us some power
IOLog("System Wake\n");
systemWake(); // tell the tree we're waking
-
+
+ // Allow drivers to request extra processing time before clamshell
+ // sleep if kIOREMSleepEnabledKey is present.
+ // Ignore clamshell events for at least 5 seconds
+ if(getProperty(kIOREMSleepEnabledKey)) {
+ // clamshellWakeupIgnore callout clears ignoreClamshellDuringWakeup bit
+ clock_interval_to_deadline(5, kSecondScale, &deadline);
+ if(clamshellWakeupIgnore) thread_call_enter_delayed(clamshellWakeupIgnore, deadline);
+ } else ignoringClamshellDuringWakeup = false;
+
propertyPtr = OSDynamicCast(OSNumber,getProperty("WakeEvent"));
if ( propertyPtr ) { // find out what woke us
theProperty = propertyPtr->unsigned16BitValue();
}
+// **********************************************************************************
+// publishFeature
+//
+// Adds a new feature to the supported features dictionary
+//
+//
+// **********************************************************************************
+void IOPMrootDomain::publishFeature( const char * feature )
+{
+ OSDictionary *features = (OSDictionary *)getProperty(kRootDomainSupportedFeatures);
+
+ features->setObject(feature, kOSBooleanTrue);
+}
+
+
// **********************************************************************************
// newUserClient
//
IOReturn IOPMrootDomain::receivePowerNotification (UInt32 msg)
{
+ if (msg & kIOPMSetDesktopMode) {
+ desktopMode = (0 != (msg & kIOPMSetValue));
+ msg &= ~(kIOPMSetDesktopMode | kIOPMSetValue);
+ }
+ if (msg & kIOPMSetACAdaptorConnected) {
+ acAdaptorConnect = (0 != (msg & kIOPMSetValue));
+ msg &= ~(kIOPMSetACAdaptorConnected | kIOPMSetValue);
+ }
+ if (msg & kIOPMEnableClamshell) {
+ ignoringClamshell = false;
+ }
+ if (msg & kIOPMDisableClamshell) {
+ ignoringClamshell = true;
+ }
+
+ if (msg & kIOPMProcessorSpeedChange) {
+ IOService *pmu = waitForService(serviceMatching("ApplePMU"));
+ pmu->callPlatformFunction("prepareForSleep", false, 0, 0, 0, 0);
+ pm_vars->thePlatform->sleepKernel();
+ pmu->callPlatformFunction("recoverFromSleep", false, 0, 0, 0, 0);
+ }
+
if (msg & kIOPMSleepNow) {
(void) sleepSystem ();
}
(void) sleepSystem ();
}
- if (msg & kIOPMClamshellClosed) {
- if ( ! ignoringClamshell ) {
- (void) sleepSystem ();
- }
+ if (msg & kIOPMOverTemp) {
+ IOLog("Power Management received emergency overtemp signal. Going to sleep.");
+ (void) sleepSystem ();
}
- if (msg & kIOPMEnableClamshell) {
- ignoringClamshell = false;
- }
- if (msg & kIOPMDisableClamshell) {
- ignoringClamshell = true;
+ if (msg & kIOPMClamshellClosed) {
+ if ( !ignoringClamshell && !ignoringClamshellDuringWakeup
+ && (!desktopMode || !acAdaptorConnect) ) {
+
+ (void) sleepSystem ();
+ }
}
if (msg & kIOPMPowerButton) { // toggle state of sleep/wake
void IOPMrootDomain::restoreUserSpinDownTimeout ( void )
{
- if(systemBooting) {
- IOLog("!!!!! WARNING !!!!! restoreUserSpinDownTimeout called too early\n");
- }
- //IOLog("restoreUserSpinDownTimeout, user_spindown = %u\n", user_spindown);
-
super::setAggressiveness((unsigned long)kPMMinutesToSpinDown,(unsigned long)user_spindown);
}
case kIOMessageSystemWillPowerOff:
case kIOMessageSystemWillRestart:
+ ret = kIOReturnUnsupported;
break;
default:
#include <IOKit/IODeviceTreeSupport.h>
#include <IOKit/nvram/IONVRAMController.h>
+
+#define kIODTNVRAMOFPartitionName "common"
+#define kIODTNVRAMXPRAMPartitionName "APL,MacOS75"
+#define kIODTNVRAMFreePartitionName "wwwwwwwwwwww"
+
enum {
kIODTNVRAMImageSize = 0x2000,
kIODTNVRAMXPRAMSize = 0x0100,
UInt8 *_ofImage;
bool _ofImageDirty;
OSDictionary *_ofDict;
+ OSDictionary *_nvramPartitionOffsets;
+ OSDictionary *_nvramPartitionLengths;
UInt32 _xpramPartitionOffset;
UInt32 _xpramPartitionSize;
UInt8 *_xpramImage;
virtual IOReturn writeNVRAMProperty(IORegistryEntry *entry,
const OSSymbol *name,
OSData *value);
+
+ virtual OSDictionary *getNVRAMPartitions(void);
+
+ virtual IOReturn readNVRAMPartition(const OSSymbol *partitionID,
+ IOByteCount offset, UInt8 *buffer,
+ IOByteCount length);
+
+ virtual IOReturn writeNVRAMPartition(const OSSymbol *partitionID,
+ IOByteCount offset, UInt8 *buffer,
+ IOByteCount length);
};
#endif /* !_IOKIT_IONVRAM_H */
/* virtual */ IOReturn readXPRAM(IOByteCount offset, UInt8 * buffer,
IOByteCount length);
+
/* virtual */ IOReturn writeXPRAM(IOByteCount offset, UInt8 * buffer,
IOByteCount length);
IORegistryEntry * entry,
const OSSymbol * name, OSData * value );
+ // This returns a dictionary describing all the NVRAM partitions.
+ // The keys will be the partitionIDs of the form "0x52,nvram".
+ // The values will be OSNumbers of the partition's byte count.
+ /* virtual */ OSDictionary *getNVRAMPartitions(void);
+
+ /* virtual */ IOReturn readNVRAMPartition(const OSSymbol * partitionID,
+ IOByteCount offset, UInt8 * buffer,
+ IOByteCount length);
+
+ /* virtual */ IOReturn writeNVRAMPartition(const OSSymbol * partitionID,
+ IOByteCount offset, UInt8 * buffer,
+ IOByteCount length);
OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 0);
OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 1);
kIOPMClamshellClosed = (1<<4), // clamshell was closed
kIOPMPowerEmergency = (1<<5), // battery dangerously low
kIOPMDisableClamshell = (1<<6), // do not sleep on clamshell closure
- kIOPMEnableClamshell = (1<<7) // sleep on clamshell closure
+ kIOPMEnableClamshell = (1<<7), // sleep on clamshell closure
+ kIOPMProcessorSpeedChange = (1<<8), // change the processor speed
+ kIOPMOverTemp = (1<<9) // system dangerously hot
};
// Return codes
kPMMinutesToSpinDown,
kPMMinutesToSleep,
kPMEthernetWakeOnLANSettings,
+ kPMSetProcessorSpeed
};
#define kMaxType kPMEthernetWakeOnLANSettings
+#define kAppleClamshellStateKey "AppleClamshellState"
+#define kIOREMSleepEnabledKey "REMSleepEnabled"
#define kIOBatteryInfoKey "IOBatteryInfo"
#define kIOBatteryCurrentChargeKey "Current"
#define kIOBatteryFlagsKey "Flags"
#define kIOBatteryVoltageKey "Voltage"
#define kIOBatteryAmperageKey "Amperage"
+
enum {
kIOBatteryInstalled = (1 << 2),
kIOBatteryCharge = (1 << 1),
kIOBatteryChargerConnect = (1 << 0)
};
+// These flags are deprecated. Use the version with the kIOPM prefix below.
+enum {
+ kACInstalled = kIOBatteryChargerConnect,
+ kBatteryCharging = kIOBatteryCharge,
+ kBatteryInstalled = kIOBatteryInstalled,
+ kUPSInstalled = (1<<3),
+ kBatteryAtWarn = (1<<4),
+ kBatteryDepleted = (1<<5),
+ kACnoChargeCapability = (1<<6), // AC adapter cannot charge battery
+ kRawLowBattery = (1<<7), // used only by Platform Expert
+ kForceLowSpeed = (1<<8) // set by Platfm Expert, chk'd by Pwr Plugin};
+};
+
+// For use with IOPMPowerSource bFlags
+#define IOPM_POWER_SOURCE_REV 2
+enum {
+ kIOPMACInstalled = kIOBatteryChargerConnect,
+ kIOPMBatteryCharging = kIOBatteryCharge,
+ kIOPMBatteryInstalled = kIOBatteryInstalled,
+ kIOPMUPSInstalled = (1<<3),
+ kIOPMBatteryAtWarn = (1<<4),
+ kIOPMBatteryDepleted = (1<<5),
+ kIOPMACnoChargeCapability = (1<<6), // AC adapter cannot charge battery
+ kIOPMRawLowBattery = (1<<7), // used only by Platform Expert
+ kIOPMForceLowSpeed = (1<<8), // set by Platfm Expert, chk'd by Pwr Plugin
+ kIOPMClosedClamshell = (1<<9), // set by PMU - reflects state of the clamshell
+ kIOPMClamshellStateOnWake = (1<<10) // used only by Platform Expert
+};
+
+
#if KERNEL && __cplusplus
class IOService;
#include <libkern/c++/OSObject.h>
#include <IOKit/IOTypes.h>
#include <IOKit/IOReturn.h>
+#include "IOPM.h"
class ApplePMU;
-// Our flags
-
-enum {
- kBatteryInstalled = (1<<0),
- kBatteryCharging = (1<<1),
- kACInstalled = (1<<2),
- kUPSInstalled = (1<<3),
- kBatteryAtWarn = (1<<4),
- kBatteryDepleted = (1<<5)
-};
-
const unsigned long kSecondsPerHour = (60*60);
const unsigned long kTenMinutesInSeconds = (10 * 60);
--- /dev/null
+/*
+ * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
+ *
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+#ifndef _IOKIT_IOPMPRIVATE_H
+#define _IOKIT_IOPMPRIVATE_H
+
+#include <IOKit/pwr_mgt/IOPM.h>
+
+// Private power commands issued to root domain
+// bits 0-7 in IOPM.h
+
+enum {
+ kIOPMSetValue = (1<<16),
+ // don't sleep on clamshell closure on a portable with AC connected
+ kIOPMSetDesktopMode = (1<<17),
+ // set state of AC adaptor connected
+ kIOPMSetACAdaptorConnected = (1<<18),
+};
+
+#endif /* ! _IOKIT_IOPMPRIVATE_H */
+
IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A
export INCDIR = $(IOKIT_FRAMEDIR)/Headers
-export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders
+export LCLDIR = $(FRAMEDIR)/Kernel.framework/Versions/A/PrivateHeaders/IOKit
include $(MakeInc_cmd)
include $(MakeInc_def)
IOPMinformee.h \
IOPMinformeeList.h \
IOPMlog.h \
- IOPMpmChild.h
+ IOPMpmChild.h \
+ IOPMPrivate.h
INSTINC_SUBDIRS =
INSTINC_SUBDIRS_PPC =
ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h))
INSTALL_MI_LIST = IOPMLibDefs.h IOPM.h
-INSTALL_MI_LCL_LIST = ""
+INSTALL_MI_LCL_LIST = IOPMPrivate.h
INSTALL_MI_DIR = $(MI_DIR)
EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS))
class RootDomainUserClient;
+#define kRootDomainSupportedFeatures "Supported Features"
+
enum {
kRootDomainSleepNotSupported = 0x00000000,
kRootDomainSleepSupported = 0x00000001,
IOReturn rootDomainShutdown ( void );
}
+#define IOPM_ROOTDOMAIN_REV 2
class IOPMrootDomain: public IOService
{
virtual IOOptionBits getSleepSupported();
virtual IOReturn requestPowerDomainState ( IOPMPowerFlags, IOPowerConnection *, unsigned long );
virtual void handleSleepTimerExpiration ( void );
+ void stopIgnoringClamshellEventsDuringWakeup ( void );
void wakeFromDoze( void );
+ void publishFeature( const char *feature );
private:
long longestNonSleepSlider; // pref: longest of other idle times
long extraSleepDelay; // sleepSlider - longestNonSleepSlider
thread_call_t extraSleepTimer; // used to wait between say display idle and system idle
-
+ thread_call_t clamshellWakeupIgnore; // Used to ignore clamshell close events while we're waking from sleep
+
virtual void powerChangeDone ( unsigned long );
virtual void command_received ( void *, void * , void * , void *);
virtual bool tellChangeDown ( unsigned long stateNum);
void adjustPowerState( void );
void restoreUserSpinDownTimeout ( void );
+
unsigned int user_spindown; // User's selected disk spindown value
unsigned int systemBooting:1;
unsigned int canSleep:1;
unsigned int idleSleepPending:1;
unsigned int sleepASAP:1;
- unsigned int reservedA:1;
- unsigned int reservedB[2];
+ unsigned int desktopMode:1;
+
+ unsigned int acAdaptorConnect:1;
+ unsigned int ignoringClamshellDuringWakeup:1;
+ unsigned int reservedA:6;
+ unsigned char reservedB[3];
+
thread_call_t diskSyncCalloutEntry;
IOOptionBits platformSleepSupport;
};
_nvramImage = IONew(UInt8, kIODTNVRAMImageSize);
if (_nvramImage == 0) return false;
-
+
+ _nvramPartitionOffsets = OSDictionary::withCapacity(1);
+ if (_nvramPartitionOffsets == 0) return false;
+
+ _nvramPartitionLengths = OSDictionary::withCapacity(1);
+ if (_nvramPartitionLengths == 0) return false;
+
_registryPropertiesKey = OSSymbol::withCStringNoCopy("aapl,pci");
if (_registryPropertiesKey == 0) return false;
void IODTNVRAM::registerNVRAMController(IONVRAMController *nvram)
{
- UInt32 currentOffset = 0;
+ char partitionID[18];
+ UInt32 partitionOffset, partitionLength;
+ UInt32 freePartitionOffset, freePartitionSize;
+ UInt32 currentLength, currentOffset = 0;
+ OSNumber *partitionOffsetNumber, *partitionLengthNumber;
if (_nvramController != 0) return;
_nvramController->read(0, _nvramImage, kIODTNVRAMImageSize);
- // Find the offsets for the OF, XPRAM and NameRegistry partitions in NVRAM.
+ // Find the offsets for the OF, XPRAM, and NameRegistry partitions.
_ofPartitionOffset = 0xFFFFFFFF;
_xpramPartitionOffset = 0xFFFFFFFF;
_nrPartitionOffset = 0xFFFFFFFF;
+ freePartitionOffset = 0xFFFFFFFF;
+ freePartitionSize = 0;
if (getPlatform()->getBootROMType()) {
// Look through the partitions to find the OF, MacOS partitions.
while (currentOffset < kIODTNVRAMImageSize) {
- if (strcmp((const char *)_nvramImage + currentOffset + 4, "common") == 0) {
- _ofPartitionOffset = currentOffset + 16;
- _ofPartitionSize =
- (((UInt16 *)(_nvramImage + currentOffset))[1] - 1) * 0x10;
- } else if (strcmp((const char *)_nvramImage + currentOffset + 4, "APL,MacOS75") == 0) {
- _xpramPartitionOffset = currentOffset + 16;
+ currentLength = ((UInt16 *)(_nvramImage + currentOffset))[1] * 16;
+
+ partitionOffset = currentOffset + 16;
+ partitionLength = currentLength - 16;
+
+ if (strncmp((const char *)_nvramImage + currentOffset + 4,
+ kIODTNVRAMOFPartitionName, 12) == 0) {
+ _ofPartitionOffset = partitionOffset;
+ _ofPartitionSize = partitionLength;
+ } else if (strncmp((const char *)_nvramImage + currentOffset + 4,
+ kIODTNVRAMXPRAMPartitionName, 12) == 0) {
+ _xpramPartitionOffset = partitionOffset;
_xpramPartitionSize = kIODTNVRAMXPRAMSize;
_nrPartitionOffset = _xpramPartitionOffset + _xpramPartitionSize;
- _nrPartitionSize =
- (((UInt16 *)(_nvramImage + currentOffset))[1] - 1) * 0x10 -
- _xpramPartitionSize;
+ _nrPartitionSize = partitionLength - _xpramPartitionSize;
+ } else if (strncmp((const char *)_nvramImage + currentOffset + 4,
+ kIODTNVRAMFreePartitionName, 12) == 0) {
+ freePartitionOffset = currentOffset;
+ freePartitionSize = currentLength;
+ } else {
+ // Construct the partition ID from the signature and name.
+ sprintf(partitionID, "0x%02x,",
+ *(UInt8 *)(_nvramImage + currentOffset));
+ strncpy(partitionID + 5,
+ (const char *)(_nvramImage + currentOffset + 4), 12);
+ partitionID[17] = '\0';
+
+ partitionOffsetNumber = OSNumber::withNumber(partitionOffset, 32);
+ partitionLengthNumber = OSNumber::withNumber(partitionLength, 32);
+
+ // Save the partition offset and length
+ _nvramPartitionOffsets->setObject(partitionID, partitionOffsetNumber);
+ _nvramPartitionLengths->setObject(partitionID, partitionLengthNumber);
+
+ partitionOffsetNumber->release();
+ partitionLengthNumber->release();
}
- currentOffset += ((short *)(_nvramImage + currentOffset))[1] * 16;
+ currentOffset += currentLength;
}
} else {
// Use the fixed address for old world machines.
return err;
}
+OSDictionary *IODTNVRAM::getNVRAMPartitions(void)
+{
+ return _nvramPartitionLengths;
+}
+
+IOReturn IODTNVRAM::readNVRAMPartition(const OSSymbol *partitionID,
+ IOByteCount offset, UInt8 *buffer,
+ IOByteCount length)
+{
+ OSNumber *partitionOffsetNumber, *partitionLengthNumber;
+ UInt32 partitionOffset, partitionLength;
+
+ partitionOffsetNumber =
+ (OSNumber *)_nvramPartitionOffsets->getObject(partitionID);
+ partitionLengthNumber =
+ (OSNumber *)_nvramPartitionLengths->getObject(partitionID);
+
+ if ((partitionOffsetNumber == 0) || (partitionLengthNumber == 0))
+ return kIOReturnNotFound;
+
+ partitionOffset = partitionOffsetNumber->unsigned32BitValue();
+ partitionLength = partitionLengthNumber->unsigned32BitValue();
+
+ if ((buffer == 0) || (length <= 0) || (offset < 0) ||
+ (offset + length > partitionLength))
+ return kIOReturnBadArgument;
+
+ bcopy(_nvramImage + partitionOffset + offset, buffer, length);
+
+ return kIOReturnSuccess;
+}
+IOReturn IODTNVRAM::writeNVRAMPartition(const OSSymbol *partitionID,
+ IOByteCount offset, UInt8 *buffer,
+ IOByteCount length)
+{
+ OSNumber *partitionOffsetNumber, *partitionLengthNumber;
+ UInt32 partitionOffset, partitionLength;
+
+ partitionOffsetNumber =
+ (OSNumber *)_nvramPartitionOffsets->getObject(partitionID);
+ partitionLengthNumber =
+ (OSNumber *)_nvramPartitionLengths->getObject(partitionID);
+
+ if ((partitionOffsetNumber == 0) || (partitionLengthNumber == 0))
+ return kIOReturnNotFound;
+
+ partitionOffset = partitionOffsetNumber->unsigned32BitValue();
+ partitionLength = partitionLengthNumber->unsigned32BitValue();
+
+ if ((buffer == 0) || (length <= 0) || (offset < 0) ||
+ (offset + length > partitionLength))
+ return kIOReturnBadArgument;
+
+ bcopy(buffer, _nvramImage + partitionOffset + offset, length);
+
+ _nvramImageDirty = true;
+
+ return kIOReturnSuccess;
+}
// Private methods for Open Firmware variable access.
else return kIOReturnNotReady;
}
+OSDictionary *IODTPlatformExpert::getNVRAMPartitions(void)
+{
+ if (dtNVRAM) return dtNVRAM->getNVRAMPartitions();
+ else return 0;
+}
+
+IOReturn IODTPlatformExpert::readNVRAMPartition(const OSSymbol * partitionID,
+ IOByteCount offset, UInt8 * buffer,
+ IOByteCount length)
+{
+ if (dtNVRAM) return dtNVRAM->readNVRAMPartition(partitionID, offset,
+ buffer, length);
+ else return kIOReturnNotReady;
+}
+
+IOReturn IODTPlatformExpert::writeNVRAMPartition(const OSSymbol * partitionID,
+ IOByteCount offset, UInt8 * buffer,
+ IOByteCount length)
+{
+ if (dtNVRAM) return dtNVRAM->writeNVRAMPartition(partitionID, offset,
+ buffer, length);
+ else return kIOReturnNotReady;
+}
+
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#undef super
*/
const char * gIOKernelKmods =
"{
- 'com.apple.kernel' = '5.4';
- 'com.apple.kernel.bsd' = '5.4';
- 'com.apple.kernel.iokit' = '5.4';
- 'com.apple.kernel.libkern' = '5.4';
- 'com.apple.kernel.mach' = '5.4';
+ 'com.apple.kernel' = '5.5';
+ 'com.apple.kernel.bsd' = '5.5';
+ 'com.apple.kernel.iokit' = '5.5';
+ 'com.apple.kernel.libkern' = '5.5';
+ 'com.apple.kernel.mach' = '5.5';
'com.apple.iokit.IOADBFamily' = '1.1';
+ 'com.apple.iokit.IONVRAMFamily' = '1.1';
'com.apple.iokit.IOSystemManagementFamily' = '1.1';
+ 'com.apple.iokit.ApplePlatformFamily' = '1.0';
+ 'com.apple.driver.AppleNMI' = '1.0';
}";
iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.cpp optional disabled-iokitcpp
iokit/Drivers/platform/drvAppleOHare/OHare.cpp optional iokitcpp
-
# ATA driver
#iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.cpp optional iokitcpp
#iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.cpp optional iokitcpp
vm_page_size,
UPL_COMMIT_NOTIFY_EMPTY,
pl,
- MAX_UPL_TRANSFER,
+ page_list_count,
&empty);
if (empty)
upl_deallocate(upl);
panic("ml_cause_interrupt not defined yet on Intel");
}
+void ml_thread_policy(
+ thread_t thread,
+ unsigned policy_id,
+ unsigned policy_info)
+{
+ return;
+}
+
/* Initialize Interrupts */
void ml_install_interrupt_handler(
void *nub,
/* Generate a fake interrupt */
void ml_cause_interrupt(void);
+void ml_thread_policy(
+ thread_t thread,
+ unsigned policy_id,
+ unsigned policy_info);
+
+#define MACHINE_GROUP 0x00000001
+#define MACHINE_NETWORK_GROUP 0x10000000
+#define MACHINE_NETWORK_WORKLOOP 0x00000001
+#define MACHINE_NETWORK_NETISR 0x00000002
+
/* Initialize Interrupts */
void ml_install_interrupt_handler(
void *nub,
* Universal Page List data structures
*/
-#define MAX_UPL_TRANSFER 64
+#define MAX_UPL_TRANSFER 256
struct upl_page_info {
vm_offset_t phys_addr;
static int scrreg_top, scrreg_bottom;
/* Misc */
-void vc_initialize(void);
-void vc_flush_forward_buffer(void);
-void vc_store_char(unsigned char);
+static void vc_initialize(void);
+static void vc_flush_forward_buffer(void);
+static void vc_store_char(unsigned char);
+static void vc_putchar(char ch);
void vcattach(void);
/*
* For the color support (Michel Pollet)
*/
-unsigned char vc_color_index_table[33] =
+static unsigned char vc_color_index_table[33] =
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 };
-unsigned long vc_color_depth_masks[4] =
+static unsigned long vc_color_depth_masks[4] =
{ 0x000000FF, 0x00007FFF, 0x00FFFFFF };
-unsigned long vc_colors[8][3] = {
+static unsigned long vc_colors[8][3] = {
{ 0xFFFFFFFF, 0x00000000, 0x00000000 }, /* black */
{ 0x23232323, 0x7C007C00, 0x00FF0000 }, /* red */
{ 0xb9b9b9b9, 0x03e003e0, 0x0000FF00 }, /* green */
{ 0x00000000, 0x7FFF7FFF, 0x00FFFFFF } /* white */
};
-unsigned long vc_color_mask = 0;
-unsigned long vc_color_fore = 0;
-unsigned long vc_color_back = 0;
-int vc_normal_background = 1;
+static unsigned long vc_color_mask = 0;
+static unsigned long vc_color_fore = 0;
+static unsigned long vc_color_back = 0;
+static int vc_normal_background = 1;
/*
static void (*vc_paintchar) (unsigned char c, int x, int y, int attrs);
#ifdef RENDERALLOCATE
-unsigned char *renderedFont = NULL; /* rendered font buffer */
+static unsigned char *renderedFont = NULL; /* rendered font buffer */
#else
#define REN_MAX_DEPTH 32
/* that's the size for a 32 bits buffer... */
#define REN_MAX_SIZE (128L*1024)
-unsigned char renderedFont[REN_MAX_SIZE];
+static unsigned char renderedFont[REN_MAX_SIZE];
#endif
/* Rendered Font Size */
-unsigned long vc_rendered_font_size = REN_MAX_SIZE;
-long vc_rendered_error = 0;
+static unsigned long vc_rendered_font_size = REN_MAX_SIZE;
+static long vc_rendered_error = 0;
/* If the one bit table was reversed */
-short vc_one_bit_reversed = 0;
+static short vc_one_bit_reversed = 0;
/* Size of a character in the table (bytes) */
-int vc_rendered_char_size = 0;
+static int vc_rendered_char_size = 0;
/*
# Attribute codes:
}
-void
+static void
vc_putchar(char ch)
{
if (!ch) {
/*
* Actually draws the buffer, handle the jump scroll
*/
-void vc_flush_forward_buffer(void)
+static void vc_flush_forward_buffer(void)
{
int start = 0;
int todo = 0;
int
vcputc(int l, int u, int c)
{
+ if(!vinfo.v_baseaddr)
+ return;
+
/*
* Either we're really buffering stuff or we're not yet because
* the probe hasn't been done.
* Store characters to be drawn 'later', handle overflows
*/
-void
+static void
vc_store_char(unsigned char c)
{
int flush = 0;
}
}
-void
+static void
vc_initialize(void)
{
#if 0
static boolean_t vc_acquired;
static boolean_t vc_need_clear;
-void vc_blit_rect_8c( int x, int y,
+static void vc_blit_rect_8c( int x, int y,
int width, int height,
int transparent, unsigned char * dataPtr )
{
}
-void vc_blit_rect_8m( int x, int y,
+static void vc_blit_rect_8m( int x, int y,
int width, int height,
int transparent, unsigned char * dataPtr )
{
-void vc_blit_rect_16( int x, int y,
+static void vc_blit_rect_16( int x, int y,
int width, int height,
int transparent, unsigned char * dataPtr )
{
}
}
-void vc_blit_rect_32( unsigned int x, unsigned int y,
+static void vc_blit_rect_32( unsigned int x, unsigned int y,
unsigned int width, unsigned int height,
int transparent, unsigned char * dataPtr )
{
}
}
-void vc_blit_rect( int x, int y,
+static void vc_blit_rect( int x, int y,
int width, int height,
int transparent, unsigned char * dataPtr )
{
+ if(!vinfo.v_baseaddr)
+ return;
+
switch( vinfo.v_depth) {
case 8:
vc_blit_rect_8c( x, y, width, height, transparent, dataPtr);
}
}
-void vc_progress_task( void * arg )
+static void vc_progress_task( void * arg )
{
spl_t s;
int count = (int) arg;
}
}
-boolean_t
+static boolean_t
vc_progress_set( boolean_t enable )
{
spl_t s;
extern int disableConsoleOutput;
-void vc_clear_screen( void )
+static void vc_clear_screen( void )
{
reversecursor();
vt100_reset();
*/
#include <device/device_types.h>
-extern void vc_putchar(
- char ch);
extern int vcputc(
int l,
int u,
#define risegm 0x00080000
#define eiec 13
#define eiecm 0x00040000
+#define mum 14
+#define mumm 0x00020000
#define nhr 15
#define nhrm 0x00010000
#define ice 16
#define nopti 31
#define noptim 0x00000001
+; hid1 bits
+#define hid1pcem 0xF8000000
+#define hid1prem 0x06000000
+#define hid1pi0 14
+#define hid1pi0m 0x00020000
+#define hid1ps 15
+#define hid1psm 0x00010000
+#define hid1pc0 0x0000F800
+#define hid1pr0 0x00000600
+#define hid1pc1 0x000000F8
+#define hid1pc0 0x0000F800
+#define hid1pr1 0x00000006
+
; msscr0 bits
#define shden 0
#define shdenm 0x80000000
#define pfThermalb 7
#define pfThermInt 0x00800000
#define pfThermIntb 8
+#define pfSlowNap 0x00004000
+#define pfSlowNapb 17
+#define pfNoMuMMCK 0x00002000
+#define pfNoMuMMCKb 18
#define pfLClck 0x00001000
#define pfLClckb 19
#define pfWillNap 0x00000800
unsigned int pfICTRL;
unsigned int pfLDSTCR;
unsigned int pfLDSTDB;
- unsigned int reserved[7];
+ unsigned int l2crOriginal;
+ unsigned int l3crOriginal;
+ unsigned int pfBootConfig;
+ unsigned int reserved[4];
};
typedef struct procFeatures procFeatures;
#include <mach/machine.h>
#include <ppc/vmachmon.h>
#include <ppc/PPCcalls.h>
+#include <ppc/mem.h>
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE)0)->MEMBER)
DECLARE("pfWillNapb", pfWillNapb);
DECLARE("pfNoMSRir", pfNoMSRir);
DECLARE("pfNoMSRirb", pfNoMSRirb);
+ DECLARE("pfSlowNap", pfSlowNap);
+ DECLARE("pfSlowNapb", pfSlowNapb);
+ DECLARE("pfNoMuMMCK", pfNoMuMMCK);
+ DECLARE("pfNoMuMMCKb", pfNoMuMMCKb);
DECLARE("pfLClck", pfLClck);
DECLARE("pfLClckb", pfLClckb);
DECLARE("pfL3pdet", pfL3pdet);
DECLARE("pfICTRL", offsetof(struct per_proc_info *, pf.pfICTRL));
DECLARE("pfLDSTCR", offsetof(struct per_proc_info *, pf.pfLDSTCR));
DECLARE("pfLDSTDB", offsetof(struct per_proc_info *, pf.pfLDSTDB));
+ DECLARE("pfl2crOriginal", offsetof(struct per_proc_info *, pf.l2crOriginal));
+ DECLARE("pfl3crOriginal", offsetof(struct per_proc_info *, pf.l3crOriginal));
+ DECLARE("pfBootConfig", offsetof(struct per_proc_info *, pf.pfBootConfig));
DECLARE("pfSize", sizeof(procFeatures));
DECLARE("thrmmaxTemp", offsetof(struct per_proc_info *, thrm.maxTemp));
DECLARE("CPU_SUBTYPE_POWERPC_7400", CPU_SUBTYPE_POWERPC_7400);
DECLARE("CPU_SUBTYPE_POWERPC_7450", CPU_SUBTYPE_POWERPC_7450);
+ DECLARE("shdIBAT", offsetof(struct shadowBAT *, IBATs));
+ DECLARE("shdDBAT", offsetof(struct shadowBAT *, DBATs));
return(0); /* For ANSI C :-) */
stw r6,napTotal+4(r2) ; Save the low total
stw r8,napTotal(r2) ; Save the high total
stw r3,savesrr0(r13) ; Modify to return to nap/doze exit
-
+
+
notNapping: stw r12,saver12(r13) /* Save this one */
bf- featL1ena,skipz3 ; L1 cache is disabled...
; fix up to return directly to caller of probe.
;
- lwz r30,saver5(r13) ; Get proper DBAT values
- lwz r28,saver6(r13)
- lwz r27,saver7(r13)
- lwz r11,saver8(r13)
- lwz r18,saver9(r13)
+ lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
+ ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
+
+ lwz r30,0(r11) ; Pick up DBAT 0 high
+ lwz r28,4(r11) ; Pick up DBAT 0 low
+ lwz r27,8(r11) ; Pick up DBAT 1 high
+ lwz r18,16(r11) ; Pick up DBAT 2 high
+ lwz r11,24(r11) ; Pick up DBAT 3 high
sync
mtdbatu 0,r30 ; Restore DBAT 0 high
mtdbatl 0,r28 ; Restore DBAT 0 low
mtdbatu 1,r27 ; Restore DBAT 1 high
- mtdbatu 2,r11 ; Restore DBAT 2 high
- mtdbatu 3,r18 ; Restore DBAT 3 high
+ mtdbatu 2,r18 ; Restore DBAT 2 high
+ mtdbatu 3,r11 ; Restore DBAT 3 high
sync
+ lwz r27,saver6(r13) ; Get the saved R6 value
+ mtspr hid0,r27 ; Restore HID0
+ isync
+
lwz r28,savelr(r13) ; Get return point
lwz r27,saver0(r13) ; Get the saved MSR
li r30,0 ; Get a failure RC
CreateFakeIO();
}
+void ml_thread_policy(
+ thread_t thread,
+ unsigned policy_id,
+ unsigned policy_info)
+{
+ if ((policy_id == MACHINE_GROUP) &&
+ ((per_proc_info[0].pf.Available) & pfSMPcap))
+ thread_bind(thread, master_processor);
+}
+
void machine_idle(void)
{
if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
}
}
+#define l2em 0x80000000
+#define l3em 0x80000000
+
+extern int real_ncpus;
+
+int
+ml_enable_cache_level(int cache_level, int enable)
+{
+ int old_mode;
+ unsigned long available, ccr;
+
+ if (real_ncpus != 1) return -1;
+
+ available = per_proc_info[0].pf.Available;
+
+ if ((cache_level == 2) && (available & pfL2)) {
+ ccr = per_proc_info[0].pf.l2cr;
+ old_mode = (ccr & l2em) ? TRUE : FALSE;
+ if (old_mode != enable) {
+ if (enable) ccr = per_proc_info[0].pf.l2crOriginal;
+ else ccr = 0;
+ per_proc_info[0].pf.l2cr = ccr;
+ cacheInit();
+ }
+
+ return old_mode;
+ }
+
+ if ((cache_level == 3) && (available & pfL3)) {
+ ccr = per_proc_info[0].pf.l3cr;
+ old_mode = (ccr & l3em) ? TRUE : FALSE;
+ if (old_mode != enable) {
+ if (enable) ccr = per_proc_info[0].pf.l3crOriginal;
+ else ccr = 0;
+ per_proc_info[0].pf.l3cr = ccr;
+ cacheInit();
+ }
+
+ return old_mode;
+ }
+
+ return -1;
+}
+
void
init_ast_check(processor_t processor)
{}
/* Generate a fake interrupt */
void ml_cause_interrupt(void);
+void ml_thread_policy(
+ thread_t thread,
+ unsigned policy_id,
+ unsigned policy_info);
+
+#define MACHINE_GROUP 0x00000001
+#define MACHINE_NETWORK_GROUP 0x10000000
+#define MACHINE_NETWORK_WORKLOOP 0x00000001
+#define MACHINE_NETWORK_NETISR 0x00000002
+
#ifdef MACH_KERNEL_PRIVATE
/* check pending timers */
void machine_clock_assist(void);
void ml_get_timebase(unsigned long long *timstamp);
void ml_sense__nmi(void);
+int ml_enable_cache_level(int cache_level, int enable);
+void ml_set_processor_speed(unsigned long speed);
+
#endif /* _PPC_MACHINE_ROUTINES_H_ */
mr r0,r5
li r3,0
mprNoMSRx:
+
+ mfspr r6, hid0 ; Get a copy of hid0
+
;
; We need to insure that there is no more than 1 BAT register that
; can get a hit. There could be repercussions beyond the ken
; of mortal man. It is best not to tempt fate.
;
+
+; Note: we will reload these from the shadow BATs later
+
li r10,0 ; Clear a register
- mfdbatu r5,0 ; Save DBAT 0 high
- mfdbatl r6,0 ; Save DBAT 0 low
- mfdbatu r7,1 ; Save DBAT 1 high
- mfdbatu r8,2 ; Save DBAT 2 high
- mfdbatu r9,3 ; Save DBAT 3 high
sync ; Make sure all is well
mtdbatu 0,r10 ; Now the upper
sync ; Just make sure
+ dcbf 0,r12 ; Make sure we kill the cache to avoid paradoxes
+ sync
+
ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation
mtmsr r11 ; Do it for real
isync ; Make sure of it
sync ; Get caught up yet again
isync ; Do not go further till we are here
+ mtmsr r2 ; Turn translation back off
+ isync
+
+ mtspr hid0, r6 ; Restore HID0
+ isync
+
+ lis r10,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
+ ori r10,r10,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
+
+ lwz r5,0(r10) ; Pick up DBAT 0 high
+ lwz r6,4(r10) ; Pick up DBAT 0 low
+ lwz r7,8(r10) ; Pick up DBAT 1 high
+ lwz r8,16(r10) ; Pick up DBAT 2 high
+ lwz r9,24(r10) ; Pick up DBAT 3 high
+
mtdbatu 0,r5 ; Restore DBAT 0 high
mtdbatl 0,r6 ; Restore DBAT 0 low
mtdbatu 1,r7 ; Restore DBAT 1 high
bne- yesnap ; Yeah, need to get it again...
stw r8,napStamp(r12) ; Set high order time stamp
stw r7,napStamp+4(r12) ; Set low order nap stamp
-
+
+
;
; We have to open up interruptions here because book 4 says that we should
; turn on only the POW bit and that we should have interrupts enabled
mfspr r8,l2cr ; Get the L2CR
lwz r3,pfl2cr(r12) ; Get the L2CR value
+ rlwinm. r0,r8,0,l2e,l2e ; Was the L2 enabled?
+ bne ciflushl2 ; Yes, force flush
+ cmplwi r8, 0 ; Was the L2 all the way off?
+ beq ciinvdl2 ; Yes, force invalidate
lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits
xor r2,r8,r3 ; Get changing bits?
ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits
and. r0,r0,r2 ; Did any change?
bne- ciinvdl2 ; Yes, just invalidate and get PLL synced...
+ciflushl2:
bf pfL2fab,ciswfl2 ; Flush not in hardware...
- mr r10,r3 ; Take a copy now
+ mr r10,r8 ; Take a copy now
bf 31,cinol2lck ; Skip if pfLClck not set...
ciswfl2:
lwz r0,pfl2Size(r12) ; Get the L2 size
- oris r2,r3,hi16(l2dom) ; Set L2 to data only mode
+ oris r2,r8,hi16(l2dom) ; Set L2 to data only mode
b ciswfl2doa ; Branch to next line...
addi r10,r10,32 ; Next line
bdnz ciswfldl2a ; Do the lot...
-ciinvdl2: rlwinm r3,r3,0,l2e+1,31 ; Clear the enable bit
+ciinvdl2: rlwinm r8,r3,0,l2e+1,31 ; Use the saved L2CR and clear the enable bit
b cinla ; Branch to next line...
.align 5
-cinlc: mtspr l2cr,r3 ; Disable L2
+cinlc: mtspr l2cr,r8 ; Disable L2
sync
isync
b ciinvl2 ; It is off, go invalidate it...
ciinvl2: sync
isync
- oris r2,r3,hi16(l2im) ; Get the invalidate flag set
+
+ cmplwi r3, 0 ; Should the L2 be all the way off?
+ beq cinol2 ; Yes, done with L2
+
+ oris r2,r8,hi16(l2im) ; Get the invalidate flag set
mtspr l2cr,r2 ; Start the invalidate
sync
rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going?
bne+ ciinvdl2a ; Assume so, this will take a looong time...
sync
- mtspr l2cr,r3 ; Turn off the invalidate request
+ mtspr l2cr,r8 ; Turn off the invalidate request
cinol2:
mfspr r8,l3cr ; Get the L3CR
lwz r3,pfl3cr(r12) ; Get the L3CR value
+ rlwinm. r0,r8,0,l3e,l3e ; Was the L3 enabled?
+ bne ciflushl3 ; Yes, force flush
+ cmplwi r8, 0 ; Was the L3 all the way off?
+ beq ciinvdl3 ; Yes, force invalidate
lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits
xor r2,r8,r3 ; Get changing bits?
ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits
and. r0,r0,r2 ; Did any change?
bne- ciinvdl3 ; Yes, just invalidate and get PLL synced...
+ciflushl3:
sync ; 7450 book says do this even though not needed
- mr r10,r3 ; Take a copy now
+ mr r10,r8 ; Take a copy now
bf 31,cinol3lck ; Skip if pfL23lck not set...
rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over?
bne+ cihwfl3 ; Nope, keep going...
-ciinvdl3: rlwinm r3,r3,0,l3e+1,31 ; Clear the enable bit
+ciinvdl3: rlwinm r8,r3,0,l3e+1,31 ; Use saved L3CR value and clear the enable bit
sync ; Make sure of life, liberty, and justice
- mtspr l3cr,r3 ; Disable L3
+ mtspr l3cr,r8 ; Disable L3
sync
- ori r3,r3,lo16(l3im) ; Get the invalidate flag set
+ cmplwi r3, 0 ; Should the L3 be all the way off?
+ beq cinol3 ; Yes, done with L3
- mtspr l3cr,r3 ; Start the invalidate
+ ori r8,r8,lo16(l3im) ; Get the invalidate flag set
-ciinvdl3b: mfspr r3,l3cr ; Get the L3CR
- rlwinm. r3,r3,0,l3i,l3i ; Is the invalidate still going?
+ mtspr l3cr,r8 ; Start the invalidate
+
+ciinvdl3b: mfspr r8,l3cr ; Get the L3CR
+ rlwinm. r8,r8,0,l3i,l3i ; Is the invalidate still going?
bne+ ciinvdl3b ; Assume so...
sync
- bf pfL3pdetb, ciinvdl3nopdet
- mfspr r3,l3pdet ; ?
- rlwimi r3,r3,28,0,23 ; ?
- oris r3,r3,0xF000 ; ?
- ori r3,r3,0x0080 ; ?
- mtspr l3pdet,r3 ; ?
+ lwz r10, pfBootConfig(r12) ; ?
+ rlwinm. r10, r10, 24, 28, 31 ; ?
+ beq ciinvdl3nopdet ; ?
+
+ mfspr r8,l3pdet ; ?
+ srw r2, r8, r10 ; ?
+ rlwimi r2, r8, 0, 24, 31 ; ?
+ subfic r10, r10, 32 ; ?
+ li r8, -1 ; ?
+ ori r2, r2, 0x0080 ; ?
+ slw r8, r8, r10 ; ?
+ or r8, r2, r8 ; ?
+ mtspr l3pdet, r8 ; ?
isync
ciinvdl3nopdet:
- mfspr r3,l3cr ; Get the L3CR
- rlwinm r3,r3,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
- mtspr l3cr,r3 ; Disable the clock
+ mfspr r8,l3cr ; Get the L3CR
+ rlwinm r8,r8,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
+ mtspr l3cr,r8 ; Disable the clock
li r2,128 ; ?
ciinvdl3c: addi r2,r2,-1 ; ?
mtspr msssr0,r10 ; ?
sync
- oris r3,r3,hi16(l3em|l3clkenm) ; Turn on enable bit
- mtspr l3cr,r3 ; Enable it
+ mtspr l3cr,r3 ; Enable it as desired
sync
cinol3:
bf pfL2b,cinol2a ; No level 2 cache to enable
lwz r3,pfl2cr(r12) ; Get the L2CR value
- oris r3,r3,hi16(l2em) ; Turn on enable bit
- mtspr l2cr,r3 ; Enable it
+ cmplwi r3, 0 ; Should the L2 be all the way off?
+ beq cinol2a : Yes, done with L2
+ mtspr l2cr,r3 ; Enable it as desired
sync
;
blr ; Leave...
+/*
+** ml_set_processor_speed()
+**
+*/
+; Force a line boundry here
+ .align 5
+ .globl EXT(ml_set_processor_speed)
+
+LEXT(ml_set_processor_speed)
+ blr
}
printf("\nNo debugger configured - dumping debug information\n");
- mfdbatu(store[0],0);
- mfdbatl(store[1],0);
- mfdbatu(store[2],1);
- mfdbatl(store[3],1);
- mfdbatu(store[4],2);
- mfdbatl(store[5],2);
- mfdbatu(store[6],3);
- mfdbatl(store[7],3);
- printf("DBAT0: %08X %08X\n", store[0], store[1]);
- printf("DBAT1: %08X %08X\n", store[2], store[3]);
- printf("DBAT2: %08X %08X\n", store[4], store[5]);
- printf("DBAT3: %08X %08X\n", store[6], store[7]);
printf("MSR=%08X\n",mfmsr());
print_backtrace(NULL);
splx(spl);
#include <ppc/savearea.h>
#include <ppc/low_trace.h>
#include <ppc/Diagnostics.h>
+#include <ppc/mem.h>
#include <pexpert/pexpert.h>
/* DBAT[2] maps the I/O Segment 1:1 */
/* DBAT[3] maps the Video Segment 1:1 */
+
+ /* Initialize shadow IBATs */
+ shadow_BAT.IBATs[0].upper=BAT_INVALID;
+ shadow_BAT.IBATs[0].lower=BAT_INVALID;
+ shadow_BAT.IBATs[1].upper=BAT_INVALID;
+ shadow_BAT.IBATs[1].lower=BAT_INVALID;
+ shadow_BAT.IBATs[2].upper=BAT_INVALID;
+ shadow_BAT.IBATs[2].lower=BAT_INVALID;
+ shadow_BAT.IBATs[3].upper=BAT_INVALID;
+ shadow_BAT.IBATs[3].lower=BAT_INVALID;
+
+ /* Initialize shadow DBATs */
+ shadow_BAT.DBATs[0].upper=BAT_INVALID;
+ shadow_BAT.DBATs[0].lower=BAT_INVALID;
+ shadow_BAT.DBATs[1].upper=BAT_INVALID;
+ shadow_BAT.DBATs[1].lower=BAT_INVALID;
+ shadow_BAT.DBATs[2].upper=BAT_INVALID;
+ shadow_BAT.DBATs[2].lower=BAT_INVALID;
+ shadow_BAT.DBATs[3].upper=BAT_INVALID;
+ shadow_BAT.DBATs[3].lower=BAT_INVALID;
+
+
/* If v_baseAddr is non zero, use DBAT3 to map the video segment */
videoAddr = args->Video.v_baseAddr & 0xF0000000;
if (videoAddr) {
- /* start off specifying 1-1 mapping of video seg */
- bat.upper.word = videoAddr;
- bat.lower.word = videoAddr;
-
- bat.upper.bits.bl = 0x7ff; /* size = 256M */
- bat.upper.bits.vs = 1;
- bat.upper.bits.vp = 0;
-
- bat.lower.bits.wimg = PTE_WIMG_IO;
- bat.lower.bits.pp = 2; /* read/write access */
-
- sync();isync();
- mtdbatu(3, BAT_INVALID); /* invalidate old mapping */
- mtdbatl(3, bat.lower.word);
- mtdbatu(3, bat.upper.word);
- sync();isync();
+ /* start off specifying 1-1 mapping of video seg */
+ bat.upper.word = videoAddr;
+ bat.lower.word = videoAddr;
+
+ bat.upper.bits.bl = 0x7ff; /* size = 256M */
+ bat.upper.bits.vs = 1;
+ bat.upper.bits.vp = 0;
+
+ bat.lower.bits.wimg = PTE_WIMG_IO;
+ bat.lower.bits.pp = 2; /* read/write access */
+
+ shadow_BAT.DBATs[3].upper = bat.upper.word;
+ shadow_BAT.DBATs[3].lower = bat.lower.word;
+
+ sync();isync();
+
+ mtdbatu(3, BAT_INVALID); /* invalidate old mapping */
+ mtdbatl(3, bat.lower.word);
+ mtdbatu(3, bat.upper.word);
+ sync();isync();
}
/* Use DBAT2 to map the io segment */
addr = get_io_base_addr() & 0xF0000000;
if (addr != videoAddr) {
- /* start off specifying 1-1 mapping of io seg */
- bat.upper.word = addr;
- bat.lower.word = addr;
-
- bat.upper.bits.bl = 0x7ff; /* size = 256M */
- bat.upper.bits.vs = 1;
- bat.upper.bits.vp = 0;
-
- bat.lower.bits.wimg = PTE_WIMG_IO;
- bat.lower.bits.pp = 2; /* read/write access */
-
- sync();isync();
- mtdbatu(2, BAT_INVALID); /* invalidate old mapping */
- mtdbatl(2, bat.lower.word);
- mtdbatu(2, bat.upper.word);
- sync();isync();
+ /* start off specifying 1-1 mapping of io seg */
+ bat.upper.word = addr;
+ bat.lower.word = addr;
+
+ bat.upper.bits.bl = 0x7ff; /* size = 256M */
+ bat.upper.bits.vs = 1;
+ bat.upper.bits.vp = 0;
+
+ bat.lower.bits.wimg = PTE_WIMG_IO;
+ bat.lower.bits.pp = 2; /* read/write access */
+
+ shadow_BAT.DBATs[2].upper = bat.upper.word;
+ shadow_BAT.DBATs[2].lower = bat.lower.word;
+
+ sync();isync();
+ mtdbatu(2, BAT_INVALID); /* invalidate old mapping */
+ mtdbatl(2, bat.lower.word);
+ mtdbatu(2, bat.upper.word);
+ sync();isync();
}
if (!PE_parse_boot_arg("diag", &dgWork.dgFlags)) dgWork.dgFlags=0; /* Set diagnostic flags */
}
#endif
- /* Initialize shadow IBATs */
- shadow_BAT.IBATs[0].upper=BAT_INVALID;
- shadow_BAT.IBATs[0].lower=BAT_INVALID;
- shadow_BAT.IBATs[1].upper=BAT_INVALID;
- shadow_BAT.IBATs[1].lower=BAT_INVALID;
- shadow_BAT.IBATs[2].upper=BAT_INVALID;
- shadow_BAT.IBATs[2].lower=BAT_INVALID;
- shadow_BAT.IBATs[3].upper=BAT_INVALID;
- shadow_BAT.IBATs[3].lower=BAT_INVALID;
-
- LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */
+/*
+ * Note: the shadow BAT registers were already loaded in ppc_init.c
+ */
- /* Initialize shadow DBATs */
- shadow_BAT.DBATs[0].upper=BAT_INVALID;
- shadow_BAT.DBATs[0].lower=BAT_INVALID;
- shadow_BAT.DBATs[1].upper=BAT_INVALID;
- shadow_BAT.DBATs[1].lower=BAT_INVALID;
- mfdbatu(shadow_BAT.DBATs[2].upper,2);
- mfdbatl(shadow_BAT.DBATs[2].lower,2);
- mfdbatu(shadow_BAT.DBATs[3].upper,3);
- mfdbatl(shadow_BAT.DBATs[3].lower,3);
+ LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */
LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); /* Load up real DBATs from shadows */
- sync();isync();
#if DEBUG
for(i=0; i<4; i++) kprintf("DBAT%1d: %08X %08X\n",
i, shadow_BAT.DBATs[i].upper, shadow_BAT.DBATs[i].lower);
#define PROCESSOR_VERSION_7400 12 /* ? */
#define PROCESSOR_VERSION_7410 0x800C /* ? */
#define PROCESSOR_VERSION_7450 0x8000 /* ? */
+#define PROCESSOR_VERSION_7455 0x8001 /* ? */
/*
* Interrupt and bootup stack for initial processor
; Specific processor initialization routines
;
-; 750CX
-
-init750CX:
- bf firstBoot, init750 ; No init for wakeup....
- mfspr r13,hid1 ; Get HID1
- li r14,lo16(0xFD5F) ; Get valid
- rlwinm r13,r13,4,28,31 ; Isolate
- slw r14,r14,r13 ; Position
- rlwimi r17,r14,15-pfCanNapb,pfCanNapb,pfCanNapb ; Set it
- b init750 ; Join common...
-
; 750
init750:
slw r14,r14,r15 ; Set 256KB, 512KB, or 1MB
beq- init750l2none ; Not a valid setting...
+ stw r13,pfl2crOriginal(r30) ; Shadow the L2CR
stw r13,pfl2cr(r30) ; Shadow the L2CR
stw r14,pfl2Size(r30) ; Set the L2 size
b init750l2done ; Done with L2
blr ; Return...
init750nb:
- lwz r11,pfHID0(r30) ; Get HID0
+ lwz r11,pfHID0(r30) ; Get HID0
sync
mtspr hid0,r11 ; Set the HID
isync
sync
blr
+; 750CX
+
+init750CX:
+ bf firstBoot, init750 ; No init for wakeup....
+ mfspr r13,hid1 ; Get HID1
+ li r14,lo16(0xFD5F) ; Get valid
+ rlwinm r13,r13,4,28,31 ; Isolate
+ slw r14,r14,r13 ; Position
+ rlwimi r17,r14,15-pfCanNapb,pfCanNapb,pfCanNapb ; Set it
+ b init750 ; Join common...
+
+
+; 7400
+
init7400: bf firstBoot,i7400nb ; Do different if not initial boot...
mfspr r13,l2cr ; Get the L2CR
rlwinm. r0,r13,0,l2e,l2e ; Any L2?
rlwinm r15,r15,4,30,31
slw r14,r14,r15 ; Set 256KB, 512KB, 1MB, or 2MB
+ stw r13,pfl2crOriginal(r30) ; Shadow the L2CR
stw r13,pfl2cr(r30) ; Shadow the L2CR
stw r14,pfl2Size(r30) ; Set the L2 size
blr ; Return...
i7400nb:
+ li r11,0
+ mtspr l2cr,r11 ; Make sure L2CR is zero
lwz r11,pfHID0(r30) ; Get HID0
sync
mtspr hid0,r11 ; Set the HID
init7410: li r13,0 ; Clear
mtspr 1016,r13 ; Turn off direct cache
b init7400 ; Join up with common....
-
-; 7450
-init7450: bf firstBoot,i7450nb ; Do different if not initial boot...
+
+; 745X - Any 7450 family processor
+
+init745X:
+ bf firstBoot,init745Xnb ; Do different if not initial boot...
mfspr r13,l2cr ; Get the L2CR
rlwinm. r0,r13,0,l2e,l2e ; Any L2?
- bne+ i7450hl2 ; Yes...
+ bne+ init745Xhl2 ; Yes...
rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No L2, turn off feature
-i7450hl2: lis r14,hi16(256*1024) ; Base L2 size
+init745Xhl2: lis r14,hi16(256*1024) ; Base L2 size
rlwinm r15,r13,22,12,13 ; Convert to 256k, 512k, or 768k
add r14,r14,r15 ; Add in minimum
+ stw r13,pfl2crOriginal(r30) ; Shadow the L2CR
stw r13,pfl2cr(r30) ; Shadow the L2CR
stw r14,pfl2Size(r30) ; Set the L2 size
mfspr r13,l3cr ; Get the L3CR
rlwinm. r0,r13,0,l3e,l3e ; Any L3?
- bne+ i7450hl3 ; Yes...
+ bne+ init745Xhl3 ; Yes...
rlwinm r17,r17,0,pfL3b+1,pfL3b-1 ; No L3, turn off feature
-i7450hl3: cmplwi cr0,r13,0 ; No L3 if L3CR is zero
- beq- init7450none ; Go turn off the features...
+init745Xhl3: cmplwi cr0,r13,0 ; No L3 if L3CR is zero
+ beq- init745Xnone ; Go turn off the features...
lis r14,hi16(1024*1024) ; Base L3 size
rlwinm r15,r13,4,31,31 ; Get size multiplier
slw r14,r14,r15 ; Set 1 or 2MB
+ stw r13,pfl3crOriginal(r30) ; Shadow the L3CR
stw r13,pfl3cr(r30) ; Shadow the L3CR
stw r14,pfl3Size(r30) ; Set the L3 size
- b init7450fin ; Return....
+ b init745Xfin ; Return....
-init7450none:
+init745Xnone:
rlwinm r17,r17,0,pfL3fab+1,pfL3b-1 ; No 3rd level cache or assist
rlwinm r11,r17,pfWillNapb-pfCanNapb,pfCanNapb,pfCanNapb ; Set pfCanNap if pfWillNap is set
or r17,r17,r11
-init7450fin:
+init745Xfin:
rlwinm r17,r17,0,pfWillNapb+1,pfWillNapb-1 ; Make sure pfWillNap is not set
mfspr r11,hid0 ; Get the current HID0
stw r11,pfLDSTCR(r30) ; Save the LDSTCR value
mfspr r11,ldstdb ; Get the ldstdb register
stw r11,pfLDSTDB(r30) ; Save the LDSTDB value
+ mfspr r11,pir ; Get the pir register
+ stw r11,pfBootConfig(r30) ; Save the BootConfig value
blr ; Return....
-i7450nb: lwz r11,pfHID0(r30) ; Get HID0
+init745Xnb: lwz r11,pfHID0(r30) ; Get HID0
sync
mtspr hid0,r11 ; Set the HID
isync
sync
blr
+; 7450 - Specific
+
+init7450:
+ bf firstBoot, init745X ; Not boot, use standard init
+
+ mfspr r13, pir ; Get BootConfig from PIR
+ rlwinm. r14, r13, 0, 20, 23 ; Is the pdet value zero
+ bne init7450done ; No, done for now
+
+ ori r13, r13, 0x0400 ; Force pdet value to 4
+ mtspr pir, r13 ; Write back the BootConfig
+
+init7450done:
+ b init745X ; Continue with standard init
+
;
; Processor to feature table
.long 0xFFFFFF00 ; Just revisions 1.xx
.short PROCESSOR_VERSION_7450
.short 0x0100
- .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfL3pdet
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
.long init7450
.long CPU_SUBTYPE_POWERPC_7450
.long 105
.long 0xFFFFFFFF ; Just revision 2.0
.short PROCESSOR_VERSION_7450
.short 0x0200
- .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfL3pdet
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
.long init7450
.long CPU_SUBTYPE_POWERPC_7450
.long 105
.long 0xFFFF0000 ; All other revisions
.short PROCESSOR_VERSION_7450
.short 0
- .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfL3pdet
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
.long init7450
.long CPU_SUBTYPE_POWERPC_7450
.long 105
.long 32*1024
.long 32*1024
+; 7455 (1.xx) Just like 7450 2.0
+
+ .align 2
+ .long 0xFFFFFF00 ; Just revisions 1.xx
+ .short PROCESSOR_VERSION_7455
+ .short 0x0100
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
+ .long init745X
+ .long CPU_SUBTYPE_POWERPC_7450
+ .long 105
+ .long 90
+ .long 32
+ .long 32*1024
+ .long 32*1024
+
+; 7455 (2.0)
+
+ .align 2
+ .long 0xFFFFFFFF ; Just revision 2.0
+ .short PROCESSOR_VERSION_7455
+ .short 0x0200
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
+ .long init745X
+ .long CPU_SUBTYPE_POWERPC_7450
+ .long 105
+ .long 90
+ .long 32
+ .long 32*1024
+ .long 32*1024
+
+; 7455 (2.1)
+
+ .align 2
+ .long 0xFFFF0000 ; All other revisions
+ .short PROCESSOR_VERSION_7455
+ .short 0
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
+ .long init745X
+ .long CPU_SUBTYPE_POWERPC_7450
+ .long 105
+ .long 90
+ .long 32
+ .long 32*1024
+ .long 32*1024
; Default dumb loser machine
static upl_t
upl_create(
- boolean_t internal)
+ boolean_t internal,
+ vm_size_t size)
{
upl_t upl;
if(internal) {
upl = (upl_t)kalloc(sizeof(struct upl)
- + (sizeof(struct upl_page_info)*MAX_UPL_TRANSFER));
+ + (sizeof(struct upl_page_info)*(size/page_size)));
} else {
upl = (upl_t)kalloc(sizeof(struct upl));
}
if(upl->flags & UPL_INTERNAL) {
kfree((vm_offset_t)upl,
sizeof(struct upl) +
- (sizeof(struct upl_page_info) * MAX_UPL_TRANSFER));
+ (sizeof(struct upl_page_info) * (upl->size/page_size)));
} else {
kfree((vm_offset_t)upl, sizeof(struct upl));
}
}
if(upl_ptr) {
if(cntrl_flags & UPL_SET_INTERNAL) {
- upl = upl_create(TRUE);
+ upl = upl_create(TRUE, size);
user_page_list = (upl_page_info_t *)
(((vm_offset_t)upl) + sizeof(struct upl));
upl->flags |= UPL_INTERNAL;
} else {
- upl = upl_create(FALSE);
+ upl = upl_create(FALSE, size);
}
if(object->phys_contiguous) {
upl->size = size;