- if (IS_NONCOHERENT(mappingOptions) && flushCache)
- {
- if (state->fCopyMD)
- {
- state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
- }
- else
- {
- IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory);
- md->performOperation(kIOMemoryIncoherentIOStore, offset, length);
- }
- }
-
- if (fMapper)
- {
- IOMDDMAMapArgs mapArgs;
- bzero(&mapArgs, sizeof(mapArgs));
- mapArgs.fMapper = fMapper;
- mapArgs.fCommand = this;
- mapArgs.fMapSpec.device = state->fDevice;
- mapArgs.fMapSpec.alignment = fAlignMask + 1;
- mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
- mapArgs.fLength = state->fPreparedLength;
- const IOMemoryDescriptor * md = state->fCopyMD;
- if (md) { mapArgs.fOffset = 0; }
- else
- {
- md = fMemory;
- mapArgs.fOffset = state->fPreparedOffset;
- }
- ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
-//IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength);
-
- if (kIOReturnSuccess == ret)
- {
- state->fLocalMapperAlloc = mapArgs.fAlloc;
- state->fLocalMapperAllocLength = mapArgs.fAllocLength;
- state->fMapContig = mapArgs.fMapContig;
- }
- if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess;
- }
- if (kIOReturnSuccess == ret) state->fPrepared = true;
- }
- return ret;
+ if (!length) {
+ length = fMDSummary.fLength;
+ }
+
+ if (length > fMaxTransferSize) {
+ return kIOReturnNoSpace;
+ }
+
+ if (fActive++) {
+ if ((state->fPreparedOffset != offset)
+ || (state->fPreparedLength != length)) {
+ ret = kIOReturnNotReady;
+ }
+ } else {
+ if (fAlignMaskLength & length) {
+ return kIOReturnNotAligned;
+ }
+
+ if (atop_64(state->fPreparedLength) > UINT_MAX) {
+ return kIOReturnVMError;
+ }
+ state->fPreparedOffset = offset;
+ state->fPreparedLength = length;
+
+ state->fMisaligned = false;
+ state->fDoubleBuffer = false;
+ state->fPrepared = false;
+ state->fCopyNext = NULL;
+ state->fCopyPageAlloc = NULL;
+ state->fCopyPageCount = 0;
+ state->fNextRemapPage = NULL;
+ state->fCopyMD = NULL;
+ state->fLocalMapperAlloc = 0;
+ state->fLocalMapperAllocValid = false;
+ state->fLocalMapperAllocLength = 0;
+
+ state->fSourceAlignMask = fAlignMask;
+ if (fMapper) {
+ state->fSourceAlignMask &= page_mask;
+ }
+
+ state->fCursor = state->fIterateOnly
+ || (!state->fCheckAddressing
+ && (!state->fSourceAlignMask
+ || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
+
+ if (!state->fCursor) {
+ IOOptionBits op = kWalkPrepare | kWalkPreflight;
+ if (synchronize) {
+ op |= kWalkSyncOut;
+ }
+ ret = walkAll(op);
+ }
+
+ if (IS_NONCOHERENT(mappingOptions) && flushCache) {
+ if (state->fCopyMD) {
+ state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
+ } else {
+ fMemory->performOperation(kIOMemoryIncoherentIOStore, offset, length);
+ }
+ }
+
+ if (fMapper) {
+ IOMDDMAMapArgs mapArgs;
+ bzero(&mapArgs, sizeof(mapArgs));
+ mapArgs.fMapper = fMapper.get();
+ mapArgs.fCommand = this;
+ mapArgs.fMapSpec.device = state->fDevice;
+ mapArgs.fMapSpec.alignment = fAlignMask + 1;
+ mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64;
+ mapArgs.fLength = state->fPreparedLength;
+ OSSharedPtr<IOMemoryDescriptor> md = state->fCopyMD;
+ if (md) {
+ mapArgs.fOffset = 0;
+ } else {
+ md = fMemory;
+ mapArgs.fOffset = state->fPreparedOffset;
+ }
+
+ ret = md->dmaCommandOperation(kIOMDDMAMap, &mapArgs, sizeof(mapArgs));
+
+ if ((kIOReturnSuccess == ret)
+ && mapArgs.fAllocLength
+ && (mapArgs.fAllocLength != mapArgs.fLength)) {
+ do {
+ // multisegment case
+ IOMDDMAWalkSegmentState walkState;
+ IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
+ IOOptionBits mdOp;
+ uint64_t index;
+ IOPhysicalLength segLen;
+ uint32_t segCount;
+ uint64_t phys, align;
+ uint64_t mapperPageMask;
+ uint64_t mapperPageShift;
+ uint64_t insertOffset;
+ uint32_t mapOptions;
+ uint64_t length;
+
+ assert(mapArgs.fAllocLength > mapArgs.fLength);
+
+ mapperPageMask = fMapper->getPageSize();
+ assert(mapperPageMask);
+ mapperPageMask -= 1;
+ mapperPageShift = (64 - __builtin_clzll(mapperPageMask));
+ walkArgs->fMapped = false;
+ length = state->fPreparedLength;
+ mdOp = kIOMDFirstSegment;
+ segCount = 0;
+ for (index = 0; index < length; segCount++) {
+ walkArgs->fOffset = state->fPreparedOffset + index;
+
+ ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+ mdOp = kIOMDWalkSegments;
+ assert(kIOReturnSuccess == ret);
+ if (ret != kIOReturnSuccess) {
+ panic("dmaCommandOperation");
+ }
+ segLen = walkArgs->fLength;
+ index += segLen;
+ }
+ if (ret != kIOReturnSuccess) {
+ break;
+ }
+
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n", this, mapArgs.fAlloc, mapArgs.fAllocLength);
+ }
+#endif /* defined(LOGTAG) */
+
+ state->fMapSegments = IONewZero(IODMACommandMapSegment, segCount);
+ if (!state->fMapSegments) {
+ ret = kIOReturnNoMemory;
+ break;
+ }
+ state->fMapSegmentsCount = segCount;
+
+ switch (kIODirectionOutIn & fMDSummary.fDirection) {
+ case kIODirectionOut:
+ mapOptions = kIODMAMapReadAccess;
+ break;
+ case kIODirectionIn:
+ mapOptions = kIODMAMapWriteAccess;
+ break;
+ default:
+ mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess;
+ break;
+ }
+
+ mdOp = kIOMDFirstSegment;
+ segCount = 0;
+ for (insertOffset = 0, index = 0; index < length; segCount++) {
+ walkArgs->fOffset = state->fPreparedOffset + index;
+ ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+ mdOp = kIOMDWalkSegments;
+ if (ret != kIOReturnSuccess) {
+ panic("dmaCommandOperation 0x%x", ret);
+ }
+ phys = walkArgs->fIOVMAddr;
+ segLen = walkArgs->fLength;
+
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n", this, segCount, (uint64_t) phys, (uint64_t) segLen);
+ }
+#endif /* defined(LOGTAG) */
+
+ align = (phys & mapperPageMask);
+
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n", this, segCount, index, insertOffset, align);
+ }
+#endif /* defined(LOGTAG) */
+
+ assert(segCount < state->fMapSegmentsCount);
+ state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index;
+ state->fMapSegments[segCount].fMapOffset = insertOffset;
+ state->fMapSegments[segCount].fPageOffset = align;
+ index += segLen;
+
+ // segment page align
+ segLen = ((phys + segLen + mapperPageMask) & ~mapperPageMask);
+ phys -= align;
+ segLen -= phys;
+ insertOffset += segLen;
+ }
+ state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask);
+#if defined(LOGTAG)
+ if (LOGTAG == fMemory->getTag()) {
+ IOLog("IODMACommand fMapSegmentsCount %d\n", state->fMapSegmentsCount);
+ }
+#endif /* defined(LOGTAG) */
+ } while (false);
+ }
+ if (kIOReturnSuccess == ret) {
+ state->fLocalMapperAlloc = mapArgs.fAlloc;
+ state->fLocalMapperAllocValid = true;
+ state->fLocalMapperAllocLength = mapArgs.fAllocLength;
+ }
+ }
+ if (kIOReturnSuccess == ret) {
+ state->fPrepared = true;
+ }
+ }
+ return ret;