X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..04b8595b18b1b41ac7a206e4b3d51a635f8413d7:/iokit/Kernel/IODMACommand.cpp diff --git a/iokit/Kernel/IODMACommand.cpp b/iokit/Kernel/IODMACommand.cpp index dadc4dc35..3b3c0ee3a 100644 --- a/iokit/Kernel/IODMACommand.cpp +++ b/iokit/Kernel/IODMACommand.cpp @@ -30,6 +30,7 @@ #include #include +#include #include #include @@ -41,7 +42,7 @@ #include "IOKitKernelInternal.h" #define MAPTYPE(type) ((UInt) (type) & kTypeMask) -#define IS_MAPPED(type) (MAPTYPE(type) == kMapped) +#define IS_MAPPED(type) (MAPTYPE(type) != kBypassed) #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed) #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) @@ -147,6 +148,8 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc, IOMapper *mapper, void *refCon) { + IOService * device = 0; + if (!super::init() || !outSegFunc) return false; @@ -168,6 +171,12 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc, if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1 + + if (mapper && !OSDynamicCast(IOMapper, mapper)) + { + device = mapper; + mapper = 0; + } if (!mapper) { IOMapper::checkForSystemMapper(); @@ -190,7 +199,7 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc, switch (MAPTYPE(mappingOptions)) { case kMapped: break; - case kNonCoherent: fMapper = 0; break; + case kNonCoherent: /*fMapper = 0;*/ break; case kBypassed: if (mapper && !mapper->getBypassMask(&fBypassMask)) return false; @@ -208,7 +217,8 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc, bzero(reserved, sizeof(IODMACommandInternal)); fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); - + fInternalState->fDevice = device; + return true; } @@ -250,18 +260,15 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar if (mem) { bzero(&fMDSummary, sizeof(fMDSummary)); - err = mem->dmaCommandOperation( - kIOMDGetCharacteristics, - &fMDSummary, sizeof(fMDSummary)); + err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), + &fMDSummary, sizeof(fMDSummary)); if (err) return err; ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; if ((kMapped == MAPTYPE(fMappingOptions)) - && fMapper - && (!fNumAddressBits || (fNumAddressBits >= 31))) - // assuming mapped space is 2G + && fMapper) fInternalState->fCheckAddressing = false; else fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); @@ -272,10 +279,10 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); if (autoPrepare) { - err = prepare(); - if (err) { - clearMemoryDescriptor(); - } + err = prepare(); + if (err) { + clearMemoryDescriptor(); + } } } @@ -321,7 +328,7 @@ IODMACommand::segmentOp( IODMACommandInternal * state = target->reserved; - if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper) + if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperPageAlloc || !target->fMapper)) maxPhys = (1ULL << target->fNumAddressBits); else maxPhys = 0; @@ -387,9 +394,16 @@ IODMACommand::segmentOp( { if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { + addr64_t cpuAddr = address; addr64_t remapAddr; uint64_t chunk; + if ((kMapped == MAPTYPE(target->fMappingOptions)) + && target->fMapper) + { + cpuAddr = target->fMapper->mapAddr(address); + } + remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); if (!state->fDoubleBuffer) { @@ -405,12 +419,12 @@ IODMACommand::segmentOp( if (kWalkSyncIn & op) { // cppvNoModSnk - copypv(remapAddr, address, chunk, + copypv(remapAddr, cpuAddr, chunk, cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); } else { - copypv(address, remapAddr, chunk, + copypv(cpuAddr, remapAddr, chunk, cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); } address += chunk; @@ -436,13 +450,11 @@ IODMACommand::walkAll(UInt8 op) if (kWalkPreflight & op) { - state->fMapContig = false; state->fMisaligned = false; state->fDoubleBuffer = false; state->fPrepared = false; state->fCopyNext = NULL; state->fCopyPageAlloc = 0; - state->fLocalMapperPageAlloc = 0; state->fCopyPageCount = 0; state->fNextRemapPage = NULL; state->fCopyMD = 0; @@ -451,7 +463,7 @@ IODMACommand::walkAll(UInt8 op) { offset = 0; numSegments = 0-1; - ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments); + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); } op &= ~kWalkPreflight; @@ -469,6 +481,9 @@ IODMACommand::walkAll(UInt8 op) if (!state->fDoubleBuffer) { kern_return_t kr; + + if (fMapper) panic("fMapper copying"); + kr = vm_page_alloc_list(state->fCopyPageCount, KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase); if (KERN_SUCCESS != kr) @@ -484,7 +499,7 @@ IODMACommand::walkAll(UInt8 op) state->fCopyNext = state->fCopyPageAlloc; offset = 0; numSegments = 0-1; - ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments); + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); state->fPrepared = true; op &= ~(kWalkSyncIn | kWalkSyncOut); } @@ -507,19 +522,6 @@ IODMACommand::walkAll(UInt8 op) } } } - - if (state->fLocalMapper) - { - state->fLocalMapperPageCount = atop_64(round_page( - state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask))); - state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount); - if (!state->fLocalMapperPageAlloc) - { - DEBG("IODMACommand !iovmAlloc"); - return (kIOReturnNoResources); - } - state->fMapContig = true; - } } if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) @@ -533,7 +535,7 @@ IODMACommand::walkAll(UInt8 op) state->fCopyNext = state->fCopyPageAlloc; offset = 0; numSegments = 0-1; - ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments); + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); } else if (state->fCopyMD) { @@ -564,12 +566,6 @@ IODMACommand::walkAll(UInt8 op) if (kWalkComplete & op) { - if (state->fLocalMapperPageAlloc) - { - fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount); - state->fLocalMapperPageAlloc = 0; - state->fLocalMapperPageCount = 0; - } if (state->fCopyPageAlloc) { vm_page_free_list(state->fCopyPageAlloc, FALSE); @@ -636,6 +632,11 @@ IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1 + if (mapper && !OSDynamicCast(IOMapper, mapper)) + { + fInternalState->fDevice = mapper; + mapper = 0; + } if (!mapper) { IOMapper::checkForSystemMapper(); @@ -645,7 +646,7 @@ IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, switch (MAPTYPE(mappingOptions)) { case kMapped: break; - case kNonCoherent: fMapper = 0; break; + case kNonCoherent: break; case kBypassed: if (mapper && !mapper->getBypassMask(&fBypassMask)) return kIOReturnBadArgument; @@ -721,15 +722,14 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem)); state->fSourceAlignMask = fAlignMask; - if (state->fLocalMapper) + if (fMapper) state->fSourceAlignMask &= page_mask; state->fCursor = state->fIterateOnly || (!state->fCheckAddressing - && !state->fLocalMapper && (!state->fSourceAlignMask || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); - + if (!state->fCursor) { IOOptionBits op = kWalkPrepare | kWalkPreflight; @@ -737,6 +737,45 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr op |= kWalkSyncOut; ret = walkAll(op); } + + if (fMapper) + { + if (state->fLocalMapper) + { + state->fLocalMapperPageCount = atop_64(round_page( + state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask))); + state->fLocalMapperPageAlloc = ptoa_64(fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount)); + if (!state->fLocalMapperPageAlloc) + { + DEBG("IODMACommand !iovmAlloc"); + return (kIOReturnNoResources); + } + state->fMapContig = true; + } + else + { + IOMDDMAMapArgs mapArgs; + bzero(&mapArgs, sizeof(mapArgs)); + mapArgs.fMapper = fMapper; + mapArgs.fMapSpec.device = state->fDevice; + mapArgs.fMapSpec.alignment = fAlignMask + 1; + mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; + mapArgs.fOffset = state->fPreparedOffset; + mapArgs.fLength = state->fPreparedLength; + const IOMemoryDescriptor * md = state->fCopyMD; + if (!md) md = fMemory; + ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); + if (kIOReturnSuccess == ret) + { + state->fLocalMapperPageAlloc = mapArgs.fAlloc; + state->fLocalMapperPageCount = mapArgs.fAllocCount; + state->fMapContig = mapArgs.fMapContig; + } + ret = kIOReturnSuccess; + } + } + + if (kIOReturnSuccess == ret) state->fPrepared = true; } @@ -761,6 +800,20 @@ IODMACommand::complete(bool invalidateCache, bool synchronize) op |= kWalkSyncIn; ret = walkAll(op); } + if (state->fLocalMapperPageAlloc) + { + if (state->fLocalMapper) + { + fMapper->iovmFreeDMACommand(this, atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount); + } + else if (state->fLocalMapperPageCount) + { + fMapper->iovmFree(atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount); + } + state->fLocalMapperPageAlloc = 0; + state->fLocalMapperPageCount = 0; + } + state->fPrepared = false; if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) @@ -947,7 +1000,7 @@ IODMACommand::genIOVMSegments(uint32_t op, return kIOReturnBadArgument; IOMDDMAWalkSegmentArgs *state = - (IOMDDMAWalkSegmentArgs *) fState; + (IOMDDMAWalkSegmentArgs *)(void *) fState; UInt64 offset = *offsetP + internalState->fPreparedOffset; UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; @@ -986,12 +1039,27 @@ IODMACommand::genIOVMSegments(uint32_t op, state->fOffset = offset; state->fLength = memLength - offset; - if (internalState->fMapContig && (kWalkClient & op)) + if (internalState->fMapContig && internalState->fLocalMapperPageAlloc) { - ppnum_t pageNum = internalState->fLocalMapperPageAlloc; - state->fIOVMAddr = ptoa_64(pageNum) - + offset - internalState->fPreparedOffset; + state->fIOVMAddr = internalState->fLocalMapperPageAlloc + offset; rtn = kIOReturnSuccess; +#if 0 + { + uint64_t checkOffset; + IOPhysicalLength segLen; + for (checkOffset = 0; checkOffset < state->fLength; ) + { + addr64_t phys = const_cast(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); + if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) + { + panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, + state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, + phys, checkOffset); + } + checkOffset += page_size - (phys & page_mask); + } + } +#endif } else { @@ -1150,7 +1218,7 @@ IODMACommand::clientOutputSegment( if (target->fNumAddressBits && (target->fNumAddressBits < 64) && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) - && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper)) + && (target->reserved->fLocalMapperPageAlloc || !target->fMapper)) { DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); ret = kIOReturnMessageTooLarge;