/*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <IOKit/assert.h>
#define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
-
-static bool gIOEnableCopyMapper = true;
-
enum
{
kWalkSyncIn = 0x01, // bounce -> md
kWalkClient = 0x80
};
-struct ExpansionData
-{
- IOMDDMAWalkSegmentState fState;
- IOMDDMACharacteristics fMDSummary;
-
- UInt64 fPreparedOffset;
- UInt64 fPreparedLength;
-
- UInt8 fCursor;
- UInt8 fCheckAddressing;
- UInt8 fIterateOnly;
- UInt8 fMisaligned;
- UInt8 fCopyContig;
- UInt8 fPrepared;
- UInt8 fDoubleBuffer;
- UInt8 __pad[1];
-
- ppnum_t fCopyPageAlloc;
- ppnum_t fCopyPageCount;
- addr64_t fCopyNext;
-
- class IOBufferMemoryDescriptor * fCopyMD;
-};
-typedef ExpansionData IODMACommandInternal;
#define fInternalState reserved
#define fState reserved->fState
#define super OSObject
OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
-OSMetaClassDefineReservedUnused(IODMACommand, 0);
-OSMetaClassDefineReservedUnused(IODMACommand, 1);
-OSMetaClassDefineReservedUnused(IODMACommand, 2);
+OSMetaClassDefineReservedUsed(IODMACommand, 0);
+OSMetaClassDefineReservedUsed(IODMACommand, 1);
+OSMetaClassDefineReservedUsed(IODMACommand, 2);
OSMetaClassDefineReservedUnused(IODMACommand, 3);
OSMetaClassDefineReservedUnused(IODMACommand, 4);
OSMetaClassDefineReservedUnused(IODMACommand, 5);
return false;
};
- reserved = IONew(ExpansionData, 1);
+ if (fMapper)
+ fMapper->retain();
+
+ reserved = IONew(IODMACommandInternal, 1);
if (!reserved)
return false;
- bzero(reserved, sizeof(ExpansionData));
+ bzero(reserved, sizeof(IODMACommandInternal));
fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
IODMACommand::free()
{
if (reserved)
- IODelete(reserved, ExpansionData, 1);
+ IODelete(reserved, IODMACommandInternal, 1);
+
+ if (fMapper)
+ fMapper->release();
super::free();
}
else
fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
+ fInternalState->fNewMD = true;
mem->retain();
fMemory = mem;
+ mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
if (autoPrepare)
return prepare();
};
if (fMemory) {
while (fActive)
complete();
+ fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
fMemory->release();
fMemory = 0;
}
void *segments,
UInt32 segmentIndex)
{
- IOOptionBits op = (IOOptionBits) reference;
+ IOOptionBits op = (uintptr_t) reference;
addr64_t maxPhys, address;
addr64_t remapAddr = 0;
uint64_t length;
IODMACommandInternal * state = target->reserved;
- if (target->fNumAddressBits && (target->fNumAddressBits < 64))
+ if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper)
maxPhys = (1ULL << target->fNumAddressBits);
else
maxPhys = 0;
if (!state->fMisaligned)
{
- state->fMisaligned |= (0 != (target->fAlignMask & address));
- if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
+ state->fMisaligned |= (0 != (state->fSourceAlignMask & address));
+ if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, state->fSourceAlignMask);
}
if (state->fMisaligned && (kWalkPreflight & op))
UInt32 numSegments;
UInt64 offset;
- if (gIOEnableCopyMapper && (kWalkPreflight & op))
+ if (kWalkPreflight & op)
{
- state->fCopyContig = false;
+ state->fMapContig = false;
state->fMisaligned = false;
state->fDoubleBuffer = false;
state->fPrepared = false;
state->fCopyNext = 0;
- state->fCopyPageAlloc = 0;
+ state->fCopyMapperPageAlloc = 0;
+ state->fLocalMapperPageAlloc = 0;
state->fCopyPageCount = 0;
+ state->fNextRemapIndex = 0;
state->fCopyMD = 0;
if (!(kWalkDoubleBuffer & op))
{
offset = 0;
numSegments = 0-1;
- ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+ ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
}
op &= ~kWalkPreflight;
mapBase = mapper->iovmAlloc(state->fCopyPageCount);
if (mapBase)
{
- state->fCopyPageAlloc = mapBase;
- if (state->fCopyPageAlloc && state->fDoubleBuffer)
+ state->fCopyMapperPageAlloc = mapBase;
+ if (state->fCopyMapperPageAlloc && state->fDoubleBuffer)
{
DEBG("contig copy map\n");
- state->fCopyContig = true;
+ state->fMapContig = true;
}
- state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
+ state->fCopyNext = ptoa_64(state->fCopyMapperPageAlloc);
offset = 0;
numSegments = 0-1;
- ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+ ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
state->fPrepared = true;
op &= ~(kWalkSyncIn | kWalkSyncOut);
}
{
DEBG("alloc IOBMD\n");
state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
- fMDSummary.fDirection, state->fPreparedLength, page_size);
+ fMDSummary.fDirection, state->fPreparedLength, state->fSourceAlignMask);
if (state->fCopyMD)
{
}
}
}
+
+ if (state->fLocalMapper)
+ {
+ state->fLocalMapperPageCount = atop_64(round_page(
+ state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
+ state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount);
+ state->fMapContig = true;
+ }
}
- if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
+ if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
{
if (state->fCopyPageCount)
{
DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
- if (state->fCopyPageAlloc)
+ if (state->fCopyMapperPageAlloc)
{
- state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
+ state->fCopyNext = ptoa_64(state->fCopyMapperPageAlloc);
offset = 0;
numSegments = 0-1;
- ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+ ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
}
else if (state->fCopyMD)
{
if (kWalkComplete & op)
{
- if (state->fCopyPageAlloc)
+ if (state->fLocalMapperPageAlloc)
+ {
+ fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount);
+ state->fLocalMapperPageAlloc = 0;
+ state->fLocalMapperPageCount = 0;
+ }
+ if (state->fCopyMapperPageAlloc)
{
- gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
- state->fCopyPageAlloc = 0;
+ gIOCopyMapper->iovmFree(state->fCopyMapperPageAlloc, state->fCopyPageCount);
+ state->fCopyMapperPageAlloc = 0;
state->fCopyPageCount = 0;
}
if (state->fCopyMD)
return (ret);
}
+UInt8
+IODMACommand::getNumAddressBits(void)
+{
+ return (fNumAddressBits);
+}
+
+UInt32
+IODMACommand::getAlignment(void)
+{
+ return (fAlignMask + 1);
+}
+
+IOReturn
+IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
+ UInt8 numAddressBits,
+ UInt64 maxSegmentSize,
+ MappingOptions mappingOptions,
+ UInt64 maxTransferSize,
+ UInt32 alignment,
+ IOMapper *mapper,
+ UInt64 offset,
+ UInt64 length,
+ bool flushCache,
+ bool synchronize)
+{
+ if (fActive)
+ return kIOReturnNotPermitted;
+
+ if (!outSegFunc || !numAddressBits)
+ return kIOReturnBadArgument;
+
+ bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
+ || OutputLittle32 == outSegFunc);
+ if (is32Bit)
+ {
+ if (!numAddressBits)
+ numAddressBits = 32;
+ else if (numAddressBits > 32)
+ return kIOReturnBadArgument; // Wrong output function for bits
+ }
+
+ if (numAddressBits && (numAddressBits < PAGE_SHIFT))
+ return kIOReturnBadArgument;
+
+ if (!maxSegmentSize)
+ maxSegmentSize--; // Set Max segment to -1
+ if (!maxTransferSize)
+ maxTransferSize--; // Set Max transfer to -1
+
+ if (!mapper)
+ {
+ IOMapper::checkForSystemMapper();
+ mapper = IOMapper::gSystem;
+ }
+
+ switch (MAPTYPE(mappingOptions))
+ {
+ case kMapped: break;
+ case kNonCoherent: fMapper = 0; break;
+ case kBypassed:
+ if (mapper && !mapper->getBypassMask(&fBypassMask))
+ return kIOReturnBadArgument;
+ break;
+ default:
+ return kIOReturnBadArgument;
+ };
+
+ fNumSegments = 0;
+ fBypassMask = 0;
+ fOutSeg = outSegFunc;
+ fNumAddressBits = numAddressBits;
+ fMaxSegmentSize = maxSegmentSize;
+ fMappingOptions = mappingOptions;
+ fMaxTransferSize = maxTransferSize;
+ if (!alignment)
+ alignment = 1;
+ fAlignMask = alignment - 1;
+ if (mapper != fMapper)
+ {
+ mapper->retain();
+ fMapper->release();
+ fMapper = mapper;
+ }
+
+ fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
+
+ return prepare(offset, length, flushCache, synchronize);
+}
+
+
IOReturn
IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
{
IODMACommandInternal * state = fInternalState;
IOReturn ret = kIOReturnSuccess;
+ MappingOptions mappingOptions = fMappingOptions;
if (!length)
length = fMDSummary.fLength;
if (length > fMaxTransferSize)
return kIOReturnNoSpace;
-#if 0
if (IS_NONCOHERENT(mappingOptions) && flushCache) {
IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
- poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
+ poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
}
-#endif
if (fActive++)
{
if ((state->fPreparedOffset != offset)
state->fPreparedOffset = offset;
state->fPreparedLength = length;
- state->fCopyContig = false;
+ state->fMapContig = false;
state->fMisaligned = false;
state->fDoubleBuffer = false;
state->fPrepared = false;
state->fCopyNext = 0;
- state->fCopyPageAlloc = 0;
+ state->fCopyMapperPageAlloc = 0;
state->fCopyPageCount = 0;
+ state->fNextRemapIndex = 0;
state->fCopyMD = 0;
+ state->fLocalMapperPageAlloc = 0;
+ state->fLocalMapperPageCount = 0;
+
+ state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
+ state->fSourceAlignMask = fAlignMask;
+ if (state->fLocalMapper)
+ state->fSourceAlignMask &= page_mask;
+
state->fCursor = state->fIterateOnly
|| (!state->fCheckAddressing
- && (!fAlignMask
- || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
+ && !state->fLocalMapper
+ && (!state->fSourceAlignMask
+ || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
+
if (!state->fCursor)
{
IOOptionBits op = kWalkPrepare | kWalkPreflight;
{
if (!state->fCursor)
{
- IOOptionBits op = kWalkComplete;
- if (synchronize)
- op |= kWalkSyncIn;
- ret = walkAll(op);
+ IOOptionBits op = kWalkComplete;
+ if (synchronize)
+ op |= kWalkSyncIn;
+ ret = walkAll(op);
}
state->fPrepared = false;
-#if 0
if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
{
- // XXX gvdl: need invalidate before Chardonnay ships
IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
- poMD->performOperation(kIOMemoryIncoherentIOInvalidate, 0, fMDSummary.fLength);
+ poMD->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
}
-#endif
}
return ret;
}
+IOReturn
+IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
+{
+ IODMACommandInternal * state = fInternalState;
+ if (fActive < 1)
+ return (kIOReturnNotReady);
+
+ if (offset)
+ *offset = state->fPreparedOffset;
+ if (length)
+ *length = state->fPreparedLength;
+
+ return (kIOReturnSuccess);
+}
+
IOReturn
IODMACommand::synchronize(IOOptionBits options)
{
return ret;
}
+struct IODMACommandTransferContext
+{
+ void * buffer;
+ UInt64 bufferOffset;
+ UInt64 remaining;
+ UInt32 op;
+};
+enum
+{
+ kIODMACommandTransferOpReadBytes = 1,
+ kIODMACommandTransferOpWriteBytes = 2
+};
+
+IOReturn
+IODMACommand::transferSegment(void *reference,
+ IODMACommand *target,
+ Segment64 segment,
+ void *segments,
+ UInt32 segmentIndex)
+{
+ IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
+ UInt64 length = min(segment.fLength, context->remaining);
+ addr64_t ioAddr = segment.fIOVMAddr;
+ addr64_t cpuAddr = ioAddr;
+
+ context->remaining -= length;
+
+ while (length)
+ {
+ UInt64 copyLen = length;
+ if ((kMapped == MAPTYPE(target->fMappingOptions))
+ && target->fMapper)
+ {
+ cpuAddr = target->fMapper->mapAddr(ioAddr);
+ copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
+ ioAddr += copyLen;
+ }
+
+ switch (context->op)
+ {
+ case kIODMACommandTransferOpReadBytes:
+ copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
+ cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
+ break;
+ case kIODMACommandTransferOpWriteBytes:
+ copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
+ cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
+ break;
+ }
+ length -= copyLen;
+ context->bufferOffset += copyLen;
+ }
+
+ return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
+}
+
+UInt64
+IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
+{
+ IODMACommandInternal * state = fInternalState;
+ IODMACommandTransferContext context;
+ Segment64 segments[1];
+ UInt32 numSegments = 0-1;
+
+ if (fActive < 1)
+ return (0);
+
+ if (offset >= state->fPreparedLength)
+ return (0);
+ length = min(length, state->fPreparedLength - offset);
+
+ context.buffer = buffer;
+ context.bufferOffset = 0;
+ context.remaining = length;
+ context.op = transferOp;
+ (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
+
+ return (length - context.remaining);
+}
+
+UInt64
+IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
+{
+ return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
+}
+
+UInt64
+IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
+{
+ return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
+}
+
IOReturn
IODMACommand::genIOVMSegments(UInt64 *offsetP,
void *segmentsP,
UInt32 *numSegmentsP)
{
- return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
+ return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
+ offsetP, segmentsP, numSegmentsP));
}
IOReturn
-IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
+IODMACommand::genIOVMSegments(uint32_t op,
+ InternalSegmentFunction outSegFunc,
void *reference,
UInt64 *offsetP,
void *segmentsP,
UInt32 *numSegmentsP)
{
- IOOptionBits op = (IOOptionBits) reference;
IODMACommandInternal * internalState = fInternalState;
IOOptionBits mdOp = kIOMDWalkSegments;
IOReturn ret = kIOReturnSuccess;
IOMDDMAWalkSegmentArgs *state =
(IOMDDMAWalkSegmentArgs *) fState;
- UInt64 offset = *offsetP + internalState->fPreparedOffset;
+ UInt64 offset = *offsetP + internalState->fPreparedOffset;
UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
if (offset >= memLength)
return kIOReturnOverrun;
- if (!offset || offset != state->fOffset) {
- state->fOffset = 0;
- state->fIOVMAddr = 0;
- state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
- mdOp = kIOMDFirstSegment;
+ if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
+ state->fOffset = 0;
+ state->fIOVMAddr = 0;
+ internalState->fNextRemapIndex = 0;
+ internalState->fNewMD = false;
+ state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
+ mdOp = kIOMDFirstSegment;
};
UInt64 bypassMask = fBypassMask;
state->fOffset = offset;
state->fLength = memLength - offset;
- if (internalState->fCopyContig && (kWalkClient & op))
+ if (internalState->fMapContig && (kWalkClient & op))
{
- state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc)
+ ppnum_t pageNum = internalState->fLocalMapperPageAlloc;
+ if (!pageNum)
+ pageNum = internalState->fCopyMapperPageAlloc;
+ state->fIOVMAddr = ptoa_64(pageNum)
+ offset - internalState->fPreparedOffset;
rtn = kIOReturnSuccess;
}
else if (gIOCopyMapper)
{
DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
- // Cache this!
- for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
+ if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
+ ptoa_64(internalState->fCopyMapperPageAlloc + internalState->fNextRemapIndex)))
+ {
+
+ curSeg.fIOVMAddr = ptoa_64(internalState->fCopyMapperPageAlloc + internalState->fNextRemapIndex)
+ + (curSeg.fIOVMAddr & PAGE_MASK);
+ internalState->fNextRemapIndex += atop_64(round_page(curSeg.fLength));
+ }
+ else for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
{
if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
- ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
+ ptoa_64(internalState->fCopyMapperPageAlloc + checkRemapIndex)))
{
- curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex) + (curSeg.fIOVMAddr & PAGE_MASK);
+ curSeg.fIOVMAddr = ptoa_64(internalState->fCopyMapperPageAlloc + checkRemapIndex)
+ + (curSeg.fIOVMAddr & PAGE_MASK);
+ internalState->fNextRemapIndex = checkRemapIndex + atop_64(round_page(curSeg.fLength));
break;
}
}
}
if (internalState->fCursor
- && (0 != (fAlignMask & curSeg.fIOVMAddr)))
+ && (0 != (internalState->fSourceAlignMask & curSeg.fIOVMAddr)))
{
curSeg.fIOVMAddr = 0;
ret = kIOReturnNotAligned;
void *reference, IODMACommand *target,
Segment64 segment, void *vSegList, UInt32 outSegIndex)
{
+ SegmentFunction segmentFunction = (SegmentFunction) reference;
IOReturn ret = kIOReturnSuccess;
if ((target->fNumAddressBits < 64)
- && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
+ && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
+ && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper))
{
DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
ret = kIOReturnMessageTooLarge;
}
- if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
+ if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
{
DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
ret = kIOReturnMessageTooLarge;
return (ret);
}
+IOReturn
+IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
+ UInt64 *offsetP,
+ void *segmentsP,
+ UInt32 *numSegmentsP)
+{
+ return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
+ offsetP, segmentsP, numSegmentsP));
+}
+
bool
IODMACommand::OutputHost32(IODMACommand *,
Segment64 segment, void *vSegList, UInt32 outSegIndex)