- IODMACommandInternal * state = fInternalState;
- IOReturn ret = kIOReturnSuccess;
- MappingOptions mappingOptions = fMappingOptions;
-
- if (!length)
- length = fMDSummary.fLength;
-
- if (length > fMaxTransferSize)
- return kIOReturnNoSpace;
-
- if (IS_NONCOHERENT(mappingOptions) && flushCache) {
- IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
-
- poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
- }
- if (fActive++)
- {
- if ((state->fPreparedOffset != offset)
- || (state->fPreparedLength != length))
- ret = kIOReturnNotReady;
- }
- else
- {
- state->fPreparedOffset = offset;
- state->fPreparedLength = length;
-
- state->fMapContig = false;
- state->fMisaligned = false;
- state->fDoubleBuffer = false;
- state->fPrepared = false;
- state->fCopyNext = NULL;
- state->fCopyPageAlloc = 0;
- state->fCopyPageCount = 0;
- state->fNextRemapPage = NULL;
- state->fCopyMD = 0;
- state->fLocalMapperPageAlloc = 0;
- state->fLocalMapperPageCount = 0;
-
- state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
-
- state->fSourceAlignMask = fAlignMask;
- if (state->fLocalMapper)
- state->fSourceAlignMask &= page_mask;
-
- state->fCursor = state->fIterateOnly
- || (!state->fCheckAddressing
- && !state->fLocalMapper
- && (!state->fSourceAlignMask
- || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
-
- if (!state->fCursor)
- {
- IOOptionBits op = kWalkPrepare | kWalkPreflight;
- if (synchronize)
- op |= kWalkSyncOut;
- ret = walkAll(op);
- }
- if (kIOReturnSuccess == ret)
- state->fPrepared = true;
- }
- return ret;
+ IODMACommandInternal * state = fInternalState;
+ IOReturn ret = kIOReturnSuccess;
+ uint32_t mappingOptions = fMappingOptions;
+
+ // check specification has been set
+ if (!fOutSeg) {
+ return kIOReturnNotReady;
+ }
+
+ if (!length) {
+ length = fMDSummary.fLength;
+ }
+
+ if (length > fMaxTransferSize) {
+ return kIOReturnNoSpace;
+ }
+
+ if (fActive++) {
+ if ((state->fPreparedOffset != offset)
+ || (state->fPreparedLength != length)) {
+ ret = kIOReturnNotReady;
+ }
+ } else {
+ if (fAlignMaskLength & length) {
+ return kIOReturnNotAligned;
+ }
+
+ if (atop_64(state->fPreparedLength) > UINT_MAX) {
+ return kIOReturnVMError;
+ }
+ state->fPreparedOffset = offset;
+ state->fPreparedLength = length;
+
+ state->fMisaligned = false;
+ state->fDoubleBuffer = false;
+ state->fPrepared = false;
+ state->fCopyNext = NULL;
+ state->fCopyPageAlloc = NULL;
+ state->fCopyPageCount = 0;
+ state->fNextRemapPage = NULL;
+ state->fCopyMD = NULL;
+ state->fLocalMapperAlloc = 0;
+ state->fLocalMapperAllocValid = false;
+ state->fLocalMapperAllocLength = 0;
+
+ state->fSourceAlignMask = fAlignMask;
+ if (fMapper) {
+ state->fSourceAlignMask &= page_mask;
+ }
+
+ state->fCursor = state->fIterateOnly
+ || (!state->fCheckAddressing
+ && (!state->fSourceAlignMask
+ || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
+
+ if (!state->fCursor) {
+ IOOptionBits op = kWalkPrepare | kWalkPreflight;
+ if (synchronize) {
+ op |= kWalkSyncOut;
+ }
+ ret = walkAll(op);
+ }
+
+ if (IS_NONCOHERENT(mappingOptions) && flushCache) {
+ if (state->fCopyMD) {
+ state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
+ } else {
+ fMemory->performOperation(kIOMemoryIncoherentIOStore, offset, length);
+ }
+ }
+
+ if (fMapper) {
+ IOMDDMAMapArgs mapArgs;
+ bzero(&mapArgs, sizeof(mapArgs));
+ mapArgs.fMapper = fMapper.get();
+ mapArgs.fCommand = this;
+ mapArgs.fMapSpec.device = state->fDevice;
+ mapArgs.fMapSpec.alignment = fAlignMask + 1;
+ mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64;
+ mapArgs.fLength = state->fPreparedLength;
+ OSSharedPtr<IOMemoryDescriptor> md = state->fCopyMD;
+ if (md) {
+ mapArgs.fOffset = 0;
+ } else {
+ md = fMemory;
+ mapArgs.fOffset = state->fPreparedOffset;
+ }
+
+ ret = md->dmaCommandOperation(kIOMDDMAMap, &mapArgs, sizeof(mapArgs));
+
+ if ((kIOReturnSuccess == ret)
+ && mapArgs.fAllocLength
+ && (mapArgs.fAllocLength != mapArgs.fLength)) {
+ do {
+ // multisegment case
+ IOMDDMAWalkSegmentState walkState;
+ IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
+ IOOptionBits mdOp;
+ uint64_t index;
+ IOPhysicalLength segLen;
+ uint32_t segCount;
+ uint64_t phys, align;
+ uint64_t mapperPageMask;
+ uint64_t mapperPageShift;
+ uint64_t insertOffset;
+ uint32_t mapOptions;
+ uint64_t length;
+
+ assert(mapArgs.fAllocLength > mapArgs.fLength);
+
+ mapperPageMask = fMapper->getPageSize();
+ assert(mapperPageMask);
+ mapperPageMask -= 1;
+ mapperPageShift = (64 - __builtin_clzll(mapperPageMask));
+ walkArgs->fMapped = false;
+ length = state->fPreparedLength;
+ mdOp = kIOMDFirstSegment;
+ segCount = 0;
+ for (index = 0; index < length; segCount++) {
+ walkArgs->fOffset = state->fPreparedOffset + index;
+
+ ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+ mdOp = kIOMDWalkSegments;
+ assert(kIOReturnSuccess == ret);
+ if (ret != kIOReturnSuccess) {
+ panic("dmaCommandOperation");
+ }
+ segLen = walkArgs->fLength;
+ index += segLen;
+ }
+ if (ret != kIOReturnSuccess) {
+ break;
+ }
+
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n", this, mapArgs.fAlloc, mapArgs.fAllocLength);
+ }
+#endif /* defined(LOGTAG) */
+
+ state->fMapSegments = IONewZero(IODMACommandMapSegment, segCount);
+ if (!state->fMapSegments) {
+ ret = kIOReturnNoMemory;
+ break;
+ }
+ state->fMapSegmentsCount = segCount;
+
+ switch (kIODirectionOutIn & fMDSummary.fDirection) {
+ case kIODirectionOut:
+ mapOptions = kIODMAMapReadAccess;
+ break;
+ case kIODirectionIn:
+ mapOptions = kIODMAMapWriteAccess;
+ break;
+ default:
+ mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess;
+ break;
+ }
+
+ mdOp = kIOMDFirstSegment;
+ segCount = 0;
+ for (insertOffset = 0, index = 0; index < length; segCount++) {
+ walkArgs->fOffset = state->fPreparedOffset + index;
+ ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+ mdOp = kIOMDWalkSegments;
+ if (ret != kIOReturnSuccess) {
+ panic("dmaCommandOperation 0x%x", ret);
+ }
+ phys = walkArgs->fIOVMAddr;
+ segLen = walkArgs->fLength;
+
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n", this, segCount, (uint64_t) phys, (uint64_t) segLen);
+ }
+#endif /* defined(LOGTAG) */
+
+ align = (phys & mapperPageMask);
+
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n", this, segCount, index, insertOffset, align);
+ }
+#endif /* defined(LOGTAG) */
+
+ assert(segCount < state->fMapSegmentsCount);
+ state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index;
+ state->fMapSegments[segCount].fMapOffset = insertOffset;
+ state->fMapSegments[segCount].fPageOffset = align;
+ index += segLen;
+
+ // segment page align
+ segLen = ((phys + segLen + mapperPageMask) & ~mapperPageMask);
+ phys -= align;
+ segLen -= phys;
+ insertOffset += segLen;
+ }
+ state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask);
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("IODMACommand fMapSegmentsCount %d\n", state->fMapSegmentsCount);
+ }
+#endif /* defined(LOGTAG) */
+ } while (false);
+ }
+ if (kIOReturnSuccess == ret) {
+ state->fLocalMapperAlloc = mapArgs.fAlloc;
+ state->fLocalMapperAllocValid = true;
+ state->fLocalMapperAllocLength = mapArgs.fAllocLength;
+ }
+ }
+ if (kIOReturnSuccess == ret) {
+ state->fPrepared = true;
+ }
+ }
+ return ret;