+ if (!done) {
+ IOMemoryDescriptor * memory =
+ internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
+ rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
+ mdOp = kIOMDWalkSegments;
+ }
+#if 0
+ if (check
+ && !ml_at_interrupt_context()
+ && (rtn == kIOReturnSuccess)
+ && fMapper
+ && strcmp("AppleNVMeMMU", fMapper->getName())) {
+ uint64_t checkOffset;
+ IOPhysicalLength segLen;
+ IOMemoryDescriptor * memory =
+ internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
+ for (checkOffset = 0; checkOffset < state->fLength;) {
+ addr64_t phys = memory->getPhysicalSegment(offset + checkOffset, &segLen, kIOMemoryMapperNone);
+ addr64_t mapperPhys;
+
+ mapperPhys = fMapper->mapToPhysicalAddress(state->fIOVMAddr + checkOffset);
+ mapperPhys |= (phys & (fMapper->getPageSize() - 1));
+ if (mapperPhys != phys) {
+ panic("DMA[%p] mismatch at offset %llx + %llx, dma %llx mapperPhys %llx != %llx, len %llx\n",
+ this, offset, checkOffset,
+ state->fIOVMAddr + checkOffset, mapperPhys, phys, state->fLength);
+ }
+ checkOffset += page_size - (phys & page_mask);
+ }
+ }
+#endif
+ if (rtn == kIOReturnSuccess) {
+ internalState->fIOVMAddrValid = true;
+ assert(state->fLength);
+ if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) {
+ UInt64 length = state->fLength;
+ offset += length;
+ curSeg.fLength += length;
+ internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
+ }
+ } else if (rtn == kIOReturnOverrun) {
+ internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
+ } else {
+ return rtn;
+ }
+ }
+
+ // seg = state, offset = end of seg
+ if (!curSegValid) {
+ UInt64 length = state->fLength;
+ offset += length;
+ curSeg.fIOVMAddr = state->fIOVMAddr;
+ curSeg.fLength = length;
+ curSegValid = true;
+ internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
+ }
+
+ if (!internalState->fIOVMAddrValid) {
+ // maxPhys
+ if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) {
+ if (internalState->fCursor) {
+ curSegValid = curSeg.fIOVMAddr = 0;
+ ret = kIOReturnMessageTooLarge;
+ break;
+ } else if (curSeg.fIOVMAddr <= maxPhys) {
+ UInt64 remain, newLength;
+
+ newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
+ DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
+ remain = curSeg.fLength - newLength;
+ state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
+ internalState->fIOVMAddrValid = true;
+ curSeg.fLength = newLength;
+ state->fLength = remain;
+ offset -= remain;
+ } else {
+ UInt64 addr = curSeg.fIOVMAddr;
+ ppnum_t addrPage = (ppnum_t) atop_64(addr);
+ vm_page_t remap = NULL;
+ UInt64 remain, newLength;
+
+ DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
+
+ remap = internalState->fNextRemapPage;
+ if (remap && (addrPage == vm_page_get_offset(remap))) {
+ } else {
+ for (remap = internalState->fCopyPageAlloc;
+ remap && (addrPage != vm_page_get_offset(remap));
+ remap = vm_page_get_next(remap)) {
+ }
+ }
+
+ if (!remap) {
+ panic("no remap page found");
+ }
+
+ curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
+ + (addr & PAGE_MASK);
+ curSegValid = true;
+ internalState->fNextRemapPage = vm_page_get_next(remap);
+
+ newLength = PAGE_SIZE - (addr & PAGE_MASK);
+ if (newLength < curSeg.fLength) {
+ remain = curSeg.fLength - newLength;
+ state->fIOVMAddr = addr + newLength;
+ internalState->fIOVMAddrValid = true;
+ curSeg.fLength = newLength;
+ state->fLength = remain;
+ offset -= remain;
+ }
+ DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
+ }
+ }
+
+ // reduce size of output segment
+ uint64_t reduce, leftover = 0;
+
+ // fMaxSegmentSize
+ if (curSeg.fLength > fMaxSegmentSize) {
+ leftover += curSeg.fLength - fMaxSegmentSize;
+ curSeg.fLength = fMaxSegmentSize;
+ state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
+ internalState->fIOVMAddrValid = true;
+ }
+
+ // alignment current length
+
+ reduce = (curSeg.fLength & fAlignMaskLength);
+ if (reduce && (curSeg.fLength > reduce)) {
+ leftover += reduce;
+ curSeg.fLength -= reduce;
+ state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
+ internalState->fIOVMAddrValid = true;
+ }
+
+ // alignment next address
+
+ reduce = (state->fIOVMAddr & fAlignMaskInternalSegments);
+ if (reduce && (curSeg.fLength > reduce)) {
+ leftover += reduce;
+ curSeg.fLength -= reduce;
+ state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
+ internalState->fIOVMAddrValid = true;
+ }
+
+ if (leftover) {
+ DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
+ leftover, offset,
+ curSeg.fIOVMAddr, curSeg.fLength);
+ state->fLength = leftover;
+ offset -= leftover;
+ }
+
+ //
+
+ if (internalState->fCursor) {
+ bool misaligned;
+ uint32_t mask;
+
+ mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask);
+ misaligned = (0 != (mask & curSeg.fIOVMAddr));
+ if (!misaligned) {
+ mask = fAlignMaskLength;
+ misaligned |= (0 != (mask & curSeg.fLength));
+ }
+ if (misaligned) {
+ if (misaligned) {
+ DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength);
+ }
+ curSegValid = curSeg.fIOVMAddr = 0;
+ ret = kIOReturnNotAligned;
+ break;
+ }
+ }
+
+ if (offset >= memLength) {
+ curSeg.fLength -= (offset - memLength);
+ offset = memLength;
+ internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
+ break;
+ }