#include <libkern/OSTypes.h>
#include <libkern/OSByteOrder.h>
+#include <libkern/OSDebug.h>
#include <IOKit/IOReturn.h>
#include <IOKit/IOLib.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
#include "IOKitKernelInternal.h"
-#include "IOCopyMapper.h"
#define MAPTYPE(type) ((UInt) (type) & kTypeMask)
-#define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
+#define IS_MAPPED(type) (MAPTYPE(type) != kBypassed)
#define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
-
-static bool gIOEnableCopyMapper = true;
-
enum
{
kWalkSyncIn = 0x01, // bounce -> md
#endif
#if 0
-#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
+#define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
#else
#define DEBG(fmt, args...) {}
#endif
-
/**************************** class IODMACommand ***************************/
#undef super
-#define super OSObject
+#define super IOCommand
OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
OSMetaClassDefineReservedUsed(IODMACommand, 0);
OSMetaClassDefineReservedUsed(IODMACommand, 1);
-OSMetaClassDefineReservedUnused(IODMACommand, 2);
+OSMetaClassDefineReservedUsed(IODMACommand, 2);
OSMetaClassDefineReservedUnused(IODMACommand, 3);
OSMetaClassDefineReservedUnused(IODMACommand, 4);
OSMetaClassDefineReservedUnused(IODMACommand, 5);
IOMapper *mapper,
void *refCon)
{
- if (!super::init() || !outSegFunc || !numAddressBits)
+ IOService * device = 0;
+
+ if (!super::init() || !outSegFunc)
return false;
bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
if (!maxTransferSize)
maxTransferSize--; // Set Max transfer to -1
+
+ if (mapper && !OSDynamicCast(IOMapper, mapper))
+ {
+ device = mapper;
+ mapper = 0;
+ }
if (!mapper)
{
IOMapper::checkForSystemMapper();
switch (MAPTYPE(mappingOptions))
{
case kMapped: break;
- case kNonCoherent: fMapper = 0; break;
+ case kNonCoherent: /*fMapper = 0;*/ break;
case kBypassed:
if (mapper && !mapper->getBypassMask(&fBypassMask))
return false;
return false;
};
+ if (fMapper)
+ fMapper->retain();
+
reserved = IONew(IODMACommandInternal, 1);
if (!reserved)
return false;
bzero(reserved, sizeof(IODMACommandInternal));
fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
-
+ fInternalState->fDevice = device;
+
return true;
}
if (reserved)
IODelete(reserved, IODMACommandInternal, 1);
+ if (fMapper)
+ fMapper->release();
+
super::free();
}
IOReturn
IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
{
+ IOReturn err = kIOReturnSuccess;
+
if (mem == fMemory)
{
if (!autoPrepare)
if (fActive)
return kIOReturnBusy;
clearMemoryDescriptor();
- };
+ }
if (mem) {
bzero(&fMDSummary, sizeof(fMDSummary));
- IOReturn rtn = mem->dmaCommandOperation(
- kIOMDGetCharacteristics,
- &fMDSummary, sizeof(fMDSummary));
- if (rtn)
- return rtn;
+ err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
+ &fMDSummary, sizeof(fMDSummary));
+ if (err)
+ return err;
ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
if ((kMapped == MAPTYPE(fMappingOptions))
- && fMapper
- && (!fNumAddressBits || (fNumAddressBits >= 31)))
- // assuming mapped space is 2G
+ && fMapper)
fInternalState->fCheckAddressing = false;
else
fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
mem->retain();
fMemory = mem;
- if (autoPrepare)
- return prepare();
- };
-
- return kIOReturnSuccess;
+ mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
+ if (autoPrepare) {
+ err = prepare();
+ if (err) {
+ clearMemoryDescriptor();
+ }
+ }
+ }
+
+ return err;
}
IOReturn
if (fMemory) {
while (fActive)
complete();
+ fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
fMemory->release();
fMemory = 0;
}
void *segments,
UInt32 segmentIndex)
{
- IOOptionBits op = (IOOptionBits) reference;
+ IOOptionBits op = (uintptr_t) reference;
addr64_t maxPhys, address;
- addr64_t remapAddr = 0;
uint64_t length;
uint32_t numPages;
IODMACommandInternal * state = target->reserved;
- if (target->fNumAddressBits && (target->fNumAddressBits < 64))
+ if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperPageAlloc || !target->fMapper))
maxPhys = (1ULL << target->fNumAddressBits);
else
maxPhys = 0;
if (!state->fMisaligned)
{
- state->fMisaligned |= (0 != (target->fAlignMask & address));
- if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
+ state->fMisaligned |= (0 != (state->fSourceAlignMask & address));
+ if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, state->fSourceAlignMask);
}
if (state->fMisaligned && (kWalkPreflight & op))
if (!length)
return (kIOReturnSuccess);
- numPages = atop_64(round_page_64(length));
- remapAddr = state->fCopyNext;
+ numPages = atop_64(round_page_64((address & PAGE_MASK) + length));
if (kWalkPreflight & op)
{
}
else
{
+ vm_page_t lastPage;
+ lastPage = NULL;
if (kWalkPrepare & op)
{
+ lastPage = state->fCopyNext;
for (IOItemCount idx = 0; idx < numPages; idx++)
- gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
- }
- if (state->fDoubleBuffer)
- state->fCopyNext += length;
- else
- {
- state->fCopyNext += round_page(length);
- remapAddr += (address & PAGE_MASK);
+ {
+ vm_page_set_offset(lastPage, atop_64(address) + idx);
+ lastPage = vm_page_get_next(lastPage);
+ }
}
- if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
+ if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
{
- DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
- (kWalkSyncIn & op) ? "->" : "<-",
- address, length, op);
- if (kWalkSyncIn & op)
- { // cppvNoModSnk
- copypv(remapAddr, address, length,
- cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
- }
- else
+ lastPage = state->fCopyNext;
+ for (IOItemCount idx = 0; idx < numPages; idx++)
{
- copypv(address, remapAddr, length,
- cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
+ if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
+ {
+ addr64_t cpuAddr = address;
+ addr64_t remapAddr;
+ uint64_t chunk;
+
+ if ((kMapped == MAPTYPE(target->fMappingOptions))
+ && target->fMapper)
+ {
+ cpuAddr = target->fMapper->mapAddr(address);
+ }
+
+ remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
+ if (!state->fDoubleBuffer)
+ {
+ remapAddr += (address & PAGE_MASK);
+ }
+ chunk = PAGE_SIZE - (address & PAGE_MASK);
+ if (chunk > length)
+ chunk = length;
+
+ DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
+ (kWalkSyncIn & op) ? "->" : "<-",
+ address, chunk, op);
+
+ if (kWalkSyncIn & op)
+ { // cppvNoModSnk
+ copypv(remapAddr, cpuAddr, chunk,
+ cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
+ }
+ else
+ {
+ copypv(cpuAddr, remapAddr, chunk,
+ cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
+ }
+ address += chunk;
+ length -= chunk;
+ }
+ lastPage = vm_page_get_next(lastPage);
}
}
+ state->fCopyNext = lastPage;
}
return kIOReturnSuccess;
UInt32 numSegments;
UInt64 offset;
- if (gIOEnableCopyMapper && (kWalkPreflight & op))
+ if (kWalkPreflight & op)
{
- state->fCopyContig = false;
state->fMisaligned = false;
state->fDoubleBuffer = false;
state->fPrepared = false;
- state->fCopyNext = 0;
+ state->fCopyNext = NULL;
state->fCopyPageAlloc = 0;
state->fCopyPageCount = 0;
- state->fNextRemapIndex = 0;
- state->fCopyMD = 0;
+ state->fNextRemapPage = NULL;
+ state->fCopyMD = 0;
if (!(kWalkDoubleBuffer & op))
{
offset = 0;
numSegments = 0-1;
- ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+ ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
}
op &= ~kWalkPreflight;
if (state->fCopyPageCount)
{
- IOMapper * mapper;
- ppnum_t mapBase = 0;
+ vm_page_t mapBase = NULL;
DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
- mapper = gIOCopyMapper;
- if (mapper)
- mapBase = mapper->iovmAlloc(state->fCopyPageCount);
- if (mapBase)
+ if (!state->fDoubleBuffer)
{
- state->fCopyPageAlloc = mapBase;
- if (state->fCopyPageAlloc && state->fDoubleBuffer)
+ kern_return_t kr;
+
+ if (fMapper) panic("fMapper copying");
+
+ kr = vm_page_alloc_list(state->fCopyPageCount,
+ KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
+ if (KERN_SUCCESS != kr)
{
- DEBG("contig copy map\n");
- state->fCopyContig = true;
+ DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
+ mapBase = NULL;
}
+ }
- state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
+ if (mapBase)
+ {
+ state->fCopyPageAlloc = mapBase;
+ state->fCopyNext = state->fCopyPageAlloc;
offset = 0;
numSegments = 0-1;
- ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+ ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
state->fPrepared = true;
op &= ~(kWalkSyncIn | kWalkSyncOut);
}
else
{
DEBG("alloc IOBMD\n");
- state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
- fMDSummary.fDirection, state->fPreparedLength, page_size);
+ mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask
+ state->fCopyMD = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
+ fMDSummary.fDirection, state->fPreparedLength, mask);
if (state->fCopyMD)
{
}
else
{
- DEBG("IODMACommand !iovmAlloc");
+ DEBG("IODMACommand !alloc IOBMD");
return (kIOReturnNoResources);
}
}
}
}
- if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
+ if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
{
if (state->fCopyPageCount)
{
if (state->fCopyPageAlloc)
{
- state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
+ state->fCopyNext = state->fCopyPageAlloc;
offset = 0;
numSegments = 0-1;
- ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+ ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
}
else if (state->fCopyMD)
{
{
if (state->fCopyPageAlloc)
{
- gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
+ vm_page_free_list(state->fCopyPageAlloc, FALSE);
state->fCopyPageAlloc = 0;
state->fCopyPageCount = 0;
}
return (ret);
}
+UInt8
+IODMACommand::getNumAddressBits(void)
+{
+ return (fNumAddressBits);
+}
+
+UInt32
+IODMACommand::getAlignment(void)
+{
+ return (fAlignMask + 1);
+}
+
IOReturn
IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
UInt8 numAddressBits,
if (fActive)
return kIOReturnNotPermitted;
- if (!outSegFunc || !numAddressBits)
+ if (!outSegFunc)
return kIOReturnBadArgument;
bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
if (!maxTransferSize)
maxTransferSize--; // Set Max transfer to -1
+ if (mapper && !OSDynamicCast(IOMapper, mapper))
+ {
+ fInternalState->fDevice = mapper;
+ mapper = 0;
+ }
if (!mapper)
{
IOMapper::checkForSystemMapper();
switch (MAPTYPE(mappingOptions))
{
case kMapped: break;
- case kNonCoherent: fMapper = 0; break;
+ case kNonCoherent: break;
case kBypassed:
if (mapper && !mapper->getBypassMask(&fBypassMask))
return kIOReturnBadArgument;
if (!alignment)
alignment = 1;
fAlignMask = alignment - 1;
- fMapper = mapper;
+ if (mapper != fMapper)
+ {
+ mapper->retain();
+ fMapper->release();
+ fMapper = mapper;
+ }
fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
if (IS_NONCOHERENT(mappingOptions) && flushCache) {
IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
- poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
+ poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
}
if (fActive++)
{
state->fPreparedOffset = offset;
state->fPreparedLength = length;
- state->fCopyContig = false;
+ state->fMapContig = false;
state->fMisaligned = false;
state->fDoubleBuffer = false;
state->fPrepared = false;
- state->fCopyNext = 0;
+ state->fCopyNext = NULL;
state->fCopyPageAlloc = 0;
state->fCopyPageCount = 0;
- state->fNextRemapIndex = 0;
+ state->fNextRemapPage = NULL;
state->fCopyMD = 0;
+ state->fLocalMapperPageAlloc = 0;
+ state->fLocalMapperPageCount = 0;
+ state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
+
+ state->fSourceAlignMask = fAlignMask;
+ if (fMapper)
+ state->fSourceAlignMask &= page_mask;
+
state->fCursor = state->fIterateOnly
|| (!state->fCheckAddressing
- && (!fAlignMask
- || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
+ && (!state->fSourceAlignMask
+ || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
+
if (!state->fCursor)
{
IOOptionBits op = kWalkPrepare | kWalkPreflight;
op |= kWalkSyncOut;
ret = walkAll(op);
}
+
+ if (fMapper)
+ {
+ if (state->fLocalMapper)
+ {
+ state->fLocalMapperPageCount = atop_64(round_page(
+ state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
+ state->fLocalMapperPageAlloc = ptoa_64(fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount));
+ if (!state->fLocalMapperPageAlloc)
+ {
+ DEBG("IODMACommand !iovmAlloc");
+ return (kIOReturnNoResources);
+ }
+ state->fMapContig = true;
+ }
+ else
+ {
+ IOMDDMAMapArgs mapArgs;
+ bzero(&mapArgs, sizeof(mapArgs));
+ mapArgs.fMapper = fMapper;
+ mapArgs.fMapSpec.device = state->fDevice;
+ mapArgs.fMapSpec.alignment = fAlignMask + 1;
+ mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
+ mapArgs.fOffset = state->fPreparedOffset;
+ mapArgs.fLength = state->fPreparedLength;
+ const IOMemoryDescriptor * md = state->fCopyMD;
+ if (!md) md = fMemory;
+ ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
+ if (kIOReturnSuccess == ret)
+ {
+ state->fLocalMapperPageAlloc = mapArgs.fAlloc;
+ state->fLocalMapperPageCount = mapArgs.fAllocCount;
+ state->fMapContig = mapArgs.fMapContig;
+ }
+ ret = kIOReturnSuccess;
+ }
+ }
+
+
if (kIOReturnSuccess == ret)
state->fPrepared = true;
}
op |= kWalkSyncIn;
ret = walkAll(op);
}
+ if (state->fLocalMapperPageAlloc)
+ {
+ if (state->fLocalMapper)
+ {
+ fMapper->iovmFreeDMACommand(this, atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
+ }
+ else if (state->fLocalMapperPageCount)
+ {
+ fMapper->iovmFree(atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
+ }
+ state->fLocalMapperPageAlloc = 0;
+ state->fLocalMapperPageCount = 0;
+ }
+
state->fPrepared = false;
if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
{
IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
- poMD->performOperation(kIOMemoryIncoherentIOFlush, 0, fMDSummary.fLength);
+ poMD->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
}
}
return ret;
}
+IOReturn
+IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
+{
+ IODMACommandInternal * state = fInternalState;
+ if (fActive < 1)
+ return (kIOReturnNotReady);
+
+ if (offset)
+ *offset = state->fPreparedOffset;
+ if (length)
+ *length = state->fPreparedLength;
+
+ return (kIOReturnSuccess);
+}
+
IOReturn
IODMACommand::synchronize(IOOptionBits options)
{
void *segments,
UInt32 segmentIndex)
{
- IODMACommandTransferContext * context = (IODMACommandTransferContext *) segments;
+ IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
UInt64 length = min(segment.fLength, context->remaining);
addr64_t ioAddr = segment.fIOVMAddr;
addr64_t cpuAddr = ioAddr;
{
IODMACommandInternal * state = fInternalState;
IODMACommandTransferContext context;
+ Segment64 segments[1];
UInt32 numSegments = 0-1;
if (fActive < 1)
context.bufferOffset = 0;
context.remaining = length;
context.op = transferOp;
- (void) genIOVMSegments(transferSegment, (void *) kWalkClient, &offset, &context, &numSegments);
+ (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
return (length - context.remaining);
}
void *segmentsP,
UInt32 *numSegmentsP)
{
- return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
+ return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
+ offsetP, segmentsP, numSegmentsP));
}
IOReturn
-IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
+IODMACommand::genIOVMSegments(uint32_t op,
+ InternalSegmentFunction outSegFunc,
void *reference,
UInt64 *offsetP,
void *segmentsP,
UInt32 *numSegmentsP)
{
- IOOptionBits op = (IOOptionBits) reference;
IODMACommandInternal * internalState = fInternalState;
IOOptionBits mdOp = kIOMDWalkSegments;
IOReturn ret = kIOReturnSuccess;
return kIOReturnBadArgument;
IOMDDMAWalkSegmentArgs *state =
- (IOMDDMAWalkSegmentArgs *) fState;
+ (IOMDDMAWalkSegmentArgs *)(void *) fState;
UInt64 offset = *offsetP + internalState->fPreparedOffset;
UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
state->fOffset = 0;
state->fIOVMAddr = 0;
- internalState->fNextRemapIndex = 0;
+ internalState->fNextRemapPage = NULL;
internalState->fNewMD = false;
state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
mdOp = kIOMDFirstSegment;
maxPhys = 0;
maxPhys--;
- while ((state->fIOVMAddr) || state->fOffset < memLength)
+ while (state->fIOVMAddr || (state->fOffset < memLength))
{
- if (!state->fIOVMAddr) {
+ // state = next seg
+ if (!state->fIOVMAddr) {
IOReturn rtn;
state->fOffset = offset;
state->fLength = memLength - offset;
- if (internalState->fCopyContig && (kWalkClient & op))
+ if (internalState->fMapContig && internalState->fLocalMapperPageAlloc)
{
- state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc)
- + offset - internalState->fPreparedOffset;
+ state->fIOVMAddr = internalState->fLocalMapperPageAlloc + offset;
rtn = kIOReturnSuccess;
+#if 0
+ {
+ uint64_t checkOffset;
+ IOPhysicalLength segLen;
+ for (checkOffset = 0; checkOffset < state->fLength; )
+ {
+ addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone);
+ if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys)
+ {
+ panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset,
+ state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength,
+ phys, checkOffset);
+ }
+ checkOffset += page_size - (phys & page_mask);
+ }
+ }
+#endif
}
else
{
mdOp = kIOMDWalkSegments;
}
- if (rtn == kIOReturnSuccess) {
+ if (rtn == kIOReturnSuccess)
+ {
assert(state->fIOVMAddr);
assert(state->fLength);
+ if ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr) {
+ UInt64 length = state->fLength;
+ offset += length;
+ curSeg.fLength += length;
+ state->fIOVMAddr = 0;
+ }
}
else if (rtn == kIOReturnOverrun)
state->fIOVMAddr = state->fLength = 0; // At end
else
return rtn;
- };
-
- if (!curSeg.fIOVMAddr) {
- UInt64 length = state->fLength;
+ }
- offset += length;
- curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
- curSeg.fLength = length;
- state->fIOVMAddr = 0;
- }
- else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
+ // seg = state, offset = end of seg
+ if (!curSeg.fIOVMAddr)
+ {
UInt64 length = state->fLength;
- offset += length;
- curSeg.fLength += length;
- state->fIOVMAddr = 0;
- };
-
+ offset += length;
+ curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
+ curSeg.fLength = length;
+ state->fIOVMAddr = 0;
+ }
if (!state->fIOVMAddr)
{
- if (kWalkClient & op)
+ if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
{
- if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
+ if (internalState->fCursor)
+ {
+ curSeg.fIOVMAddr = 0;
+ ret = kIOReturnMessageTooLarge;
+ break;
+ }
+ else if (curSeg.fIOVMAddr <= maxPhys)
+ {
+ UInt64 remain, newLength;
+
+ newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
+ DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
+ remain = curSeg.fLength - newLength;
+ state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
+ curSeg.fLength = newLength;
+ state->fLength = remain;
+ offset -= remain;
+ }
+ else
{
- if (internalState->fCursor)
+ UInt64 addr = curSeg.fIOVMAddr;
+ ppnum_t addrPage = atop_64(addr);
+ vm_page_t remap = NULL;
+ UInt64 remain, newLength;
+
+ DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
+
+ remap = internalState->fNextRemapPage;
+ if (remap && (addrPage == vm_page_get_offset(remap)))
{
- curSeg.fIOVMAddr = 0;
- ret = kIOReturnMessageTooLarge;
- break;
}
- else if (curSeg.fIOVMAddr <= maxPhys)
+ else for (remap = internalState->fCopyPageAlloc;
+ remap && (addrPage != vm_page_get_offset(remap));
+ remap = vm_page_get_next(remap))
{
- UInt64 remain, newLength;
-
- newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
- DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
- remain = curSeg.fLength - newLength;
- state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
- curSeg.fLength = newLength;
- state->fLength = remain;
- offset -= remain;
}
- else if (gIOCopyMapper)
- {
- DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
- if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
- ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex)))
- {
- curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex)
- + (curSeg.fIOVMAddr & PAGE_MASK);
- internalState->fNextRemapIndex += atop_64(round_page(curSeg.fLength));
- }
- else for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
- {
- if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
- ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
- {
- curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)
- + (curSeg.fIOVMAddr & PAGE_MASK);
- internalState->fNextRemapIndex = checkRemapIndex + atop_64(round_page(curSeg.fLength));
- break;
- }
- }
- DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
+ if (!remap) panic("no remap page found");
+
+ curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
+ + (addr & PAGE_MASK);
+ internalState->fNextRemapPage = vm_page_get_next(remap);
+
+ newLength = PAGE_SIZE - (addr & PAGE_MASK);
+ if (newLength < curSeg.fLength)
+ {
+ remain = curSeg.fLength - newLength;
+ state->fIOVMAddr = addr + newLength;
+ curSeg.fLength = newLength;
+ state->fLength = remain;
+ offset -= remain;
}
+ DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
}
}
}
if (internalState->fCursor
- && (0 != (fAlignMask & curSeg.fIOVMAddr)))
+ && (0 != (internalState->fSourceAlignMask & curSeg.fIOVMAddr)))
{
curSeg.fIOVMAddr = 0;
ret = kIOReturnNotAligned;
void *reference, IODMACommand *target,
Segment64 segment, void *vSegList, UInt32 outSegIndex)
{
+ SegmentFunction segmentFunction = (SegmentFunction) reference;
IOReturn ret = kIOReturnSuccess;
- if ((target->fNumAddressBits < 64)
- && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
+ if (target->fNumAddressBits && (target->fNumAddressBits < 64)
+ && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
+ && (target->reserved->fLocalMapperPageAlloc || !target->fMapper))
{
DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
ret = kIOReturnMessageTooLarge;
}
- if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
+ if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
{
DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
ret = kIOReturnMessageTooLarge;
return (ret);
}
+IOReturn
+IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
+ UInt64 *offsetP,
+ void *segmentsP,
+ UInt32 *numSegmentsP)
+{
+ return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
+ offsetP, segmentsP, numSegmentsP));
+}
+
bool
IODMACommand::OutputHost32(IODMACommand *,
Segment64 segment, void *vSegList, UInt32 outSegIndex)