X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4a3eedf9ecc9bbe3f3a5c6ce5e53ad199d639d32..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/iokit/Kernel/IODMACommand.cpp diff --git a/iokit/Kernel/IODMACommand.cpp b/iokit/Kernel/IODMACommand.cpp index 75d751afe..5feadeb14 100644 --- a/iokit/Kernel/IODMACommand.cpp +++ b/iokit/Kernel/IODMACommand.cpp @@ -30,6 +30,7 @@ #include #include +#include #include #include @@ -39,16 +40,10 @@ #include #include "IOKitKernelInternal.h" -#include "IOCopyMapper.h" #define MAPTYPE(type) ((UInt) (type) & kTypeMask) -#define IS_MAPPED(type) (MAPTYPE(type) == kMapped) -#define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed) #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) - -static bool gIOEnableCopyMapper = true; - enum { kWalkSyncIn = 0x01, // bounce -> md @@ -80,25 +75,24 @@ enum #endif #if 0 -#define DEBG(fmt, args...) { kprintf(fmt, ## args); } +#define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } #else #define DEBG(fmt, args...) {} #endif - /**************************** class IODMACommand ***************************/ #undef super -#define super OSObject +#define super IOCommand OSDefineMetaClassAndStructors(IODMACommand, IOCommand); OSMetaClassDefineReservedUsed(IODMACommand, 0); OSMetaClassDefineReservedUsed(IODMACommand, 1); -OSMetaClassDefineReservedUnused(IODMACommand, 2); -OSMetaClassDefineReservedUnused(IODMACommand, 3); -OSMetaClassDefineReservedUnused(IODMACommand, 4); -OSMetaClassDefineReservedUnused(IODMACommand, 5); -OSMetaClassDefineReservedUnused(IODMACommand, 6); +OSMetaClassDefineReservedUsed(IODMACommand, 2); +OSMetaClassDefineReservedUsed(IODMACommand, 3); +OSMetaClassDefineReservedUsed(IODMACommand, 4); +OSMetaClassDefineReservedUsed(IODMACommand, 5); +OSMetaClassDefineReservedUsed(IODMACommand, 6); OSMetaClassDefineReservedUnused(IODMACommand, 7); OSMetaClassDefineReservedUnused(IODMACommand, 8); OSMetaClassDefineReservedUnused(IODMACommand, 9); @@ -109,6 +103,39 @@ OSMetaClassDefineReservedUnused(IODMACommand, 13); OSMetaClassDefineReservedUnused(IODMACommand, 14); OSMetaClassDefineReservedUnused(IODMACommand, 15); +IODMACommand * +IODMACommand::withRefCon(void * refCon) +{ + IODMACommand * me = new IODMACommand; + + if (me && !me->initWithRefCon(refCon)) + { + me->release(); + return 0; + } + + return me; +} + +IODMACommand * +IODMACommand::withSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + void * refCon) +{ + IODMACommand * me = new IODMACommand; + + if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, + mapper, refCon)) + { + me->release(); + return 0; + } + + return me; +} + IODMACommand * IODMACommand::withSpecification(SegmentFunction outSegFunc, UInt8 numAddressBits, @@ -128,7 +155,7 @@ IODMACommand::withSpecification(SegmentFunction outSegFunc, { me->release(); return 0; - }; + } return me; } @@ -136,12 +163,54 @@ IODMACommand::withSpecification(SegmentFunction outSegFunc, IODMACommand * IODMACommand::cloneCommand(void *refCon) { - return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize, - fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon); + SegmentOptions segmentOptions = + { + .fStructSize = sizeof(segmentOptions), + .fNumAddressBits = (uint8_t)fNumAddressBits, + .fMaxSegmentSize = fMaxSegmentSize, + .fMaxTransferSize = fMaxTransferSize, + .fAlignment = fAlignMask + 1, + .fAlignmentLength = fAlignMaskInternalSegments + 1, + .fAlignmentInternalSegments = fAlignMaskLength + 1 + }; + + return (IODMACommand::withSpecification(fOutSeg, &segmentOptions, + fMappingOptions, fMapper, refCon)); } #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) +bool +IODMACommand::initWithRefCon(void * refCon) +{ + if (!super::init()) return (false); + + if (!reserved) + { + reserved = IONew(IODMACommandInternal, 1); + if (!reserved) return false; + } + bzero(reserved, sizeof(IODMACommandInternal)); + fRefCon = refCon; + + return (true); +} + +bool +IODMACommand::initWithSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + void * refCon) +{ + if (!initWithRefCon(refCon)) return false; + + if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, + mappingOptions, mapper)) return false; + + return (true); +} + bool IODMACommand::initWithSpecification(SegmentFunction outSegFunc, UInt8 numAddressBits, @@ -152,73 +221,118 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc, IOMapper *mapper, void *refCon) { - if (!super::init() || !outSegFunc || !numAddressBits) - return false; + SegmentOptions segmentOptions = + { + .fStructSize = sizeof(segmentOptions), + .fNumAddressBits = numAddressBits, + .fMaxSegmentSize = maxSegmentSize, + .fMaxTransferSize = maxTransferSize, + .fAlignment = alignment, + .fAlignmentLength = 1, + .fAlignmentInternalSegments = alignment + }; + + return (initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon)); +} + +IOReturn +IODMACommand::setSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper) +{ + IOService * device = 0; + UInt8 numAddressBits; + UInt64 maxSegmentSize; + UInt64 maxTransferSize; + UInt32 alignment; + + bool is32Bit; + + if (!outSegFunc || !segmentOptions) return (kIOReturnBadArgument); - bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc - || OutputLittle32 == outSegFunc); + is32Bit = ((OutputHost32 == outSegFunc) + || (OutputBig32 == outSegFunc) + || (OutputLittle32 == outSegFunc)); + + numAddressBits = segmentOptions->fNumAddressBits; + maxSegmentSize = segmentOptions->fMaxSegmentSize; + maxTransferSize = segmentOptions->fMaxTransferSize; + alignment = segmentOptions->fAlignment; if (is32Bit) { if (!numAddressBits) numAddressBits = 32; else if (numAddressBits > 32) - return false; // Wrong output function for bits + return (kIOReturnBadArgument); // Wrong output function for bits } - if (numAddressBits && (numAddressBits < PAGE_SHIFT)) - return false; + if (numAddressBits && (numAddressBits < PAGE_SHIFT)) return (kIOReturnBadArgument); - if (!maxSegmentSize) - maxSegmentSize--; // Set Max segment to -1 - if (!maxTransferSize) - maxTransferSize--; // Set Max transfer to -1 + if (!maxSegmentSize) maxSegmentSize--; // Set Max segment to -1 + if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1 - if (!mapper) + if (mapper && !OSDynamicCast(IOMapper, mapper)) + { + device = mapper; + mapper = 0; + } + if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) { IOMapper::checkForSystemMapper(); mapper = IOMapper::gSystem; } fNumSegments = 0; - fBypassMask = 0; fOutSeg = outSegFunc; fNumAddressBits = numAddressBits; fMaxSegmentSize = maxSegmentSize; fMappingOptions = mappingOptions; fMaxTransferSize = maxTransferSize; - if (!alignment) - alignment = 1; + if (!alignment) alignment = 1; fAlignMask = alignment - 1; - fMapper = mapper; - fRefCon = refCon; + + alignment = segmentOptions->fAlignmentLength; + if (!alignment) alignment = 1; + fAlignMaskLength = alignment - 1; + + alignment = segmentOptions->fAlignmentInternalSegments; + if (!alignment) alignment = (fAlignMask + 1); + fAlignMaskInternalSegments = alignment - 1; switch (MAPTYPE(mappingOptions)) { - case kMapped: break; - case kNonCoherent: fMapper = 0; break; + case kMapped: break; + case kUnmapped: break; + case kNonCoherent: break; + case kBypassed: - if (mapper && !mapper->getBypassMask(&fBypassMask)) - return false; - break; + if (!mapper) break; + return (kIOReturnBadArgument); + default: - return false; + return (kIOReturnBadArgument); }; - reserved = IONew(IODMACommandInternal, 1); - if (!reserved) - return false; - bzero(reserved, sizeof(IODMACommandInternal)); + if (mapper != fMapper) + { + if (mapper) mapper->retain(); + if (fMapper) fMapper->release(); + fMapper = mapper; + } fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); - - return true; + fInternalState->fDevice = device; + + return (kIOReturnSuccess); } void IODMACommand::free() { - if (reserved) - IODelete(reserved, IODMACommandInternal, 1); + if (reserved) IODelete(reserved, IODMACommandInternal, 1); + + if (fMapper) fMapper->release(); super::free(); } @@ -226,6 +340,8 @@ IODMACommand::free() IOReturn IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare) { + IOReturn err = kIOReturnSuccess; + if (mem == fMemory) { if (!autoPrepare) @@ -243,22 +359,19 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar if (fActive) return kIOReturnBusy; clearMemoryDescriptor(); - }; + } if (mem) { bzero(&fMDSummary, sizeof(fMDSummary)); - IOReturn rtn = mem->dmaCommandOperation( - kIOMDGetCharacteristics, - &fMDSummary, sizeof(fMDSummary)); - if (rtn) - return rtn; + err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), + &fMDSummary, sizeof(fMDSummary)); + if (err) + return err; ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; if ((kMapped == MAPTYPE(fMappingOptions)) - && fMapper - && (!fNumAddressBits || (fNumAddressBits >= 31))) - // assuming mapped space is 2G + && fMapper) fInternalState->fCheckAddressing = false; else fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); @@ -266,23 +379,27 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar fInternalState->fNewMD = true; mem->retain(); fMemory = mem; - - if (autoPrepare) - return prepare(); - }; - - return kIOReturnSuccess; + if (!fMapper) mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); + if (autoPrepare) { + err = prepare(); + if (err) { + clearMemoryDescriptor(); + } + } + } + + return err; } IOReturn IODMACommand::clearMemoryDescriptor(bool autoComplete) { - if (fActive && !autoComplete) - return (kIOReturnNotReady); + if (fActive && !autoComplete) return (kIOReturnNotReady); - if (fMemory) { - while (fActive) - complete(); + if (fMemory) + { + while (fActive) complete(); + if (!fMapper) fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); fMemory->release(); fMemory = 0; } @@ -296,6 +413,16 @@ IODMACommand::getMemoryDescriptor() const return fMemory; } +IOMemoryDescriptor * +IODMACommand::getIOMemoryDescriptor() const +{ + IOMemoryDescriptor * mem; + + mem = reserved->fCopyMD; + if (!mem) mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); + + return (mem); +} IOReturn IODMACommand::segmentOp( @@ -305,15 +432,15 @@ IODMACommand::segmentOp( void *segments, UInt32 segmentIndex) { - IOOptionBits op = (IOOptionBits) reference; + IOOptionBits op = (uintptr_t) reference; addr64_t maxPhys, address; - addr64_t remapAddr = 0; uint64_t length; uint32_t numPages; + uint32_t mask; IODMACommandInternal * state = target->reserved; - if (target->fNumAddressBits && (target->fNumAddressBits < 64)) + if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) maxPhys = (1ULL << target->fNumAddressBits); else maxPhys = 0; @@ -322,13 +449,19 @@ IODMACommand::segmentOp( address = segment.fIOVMAddr; length = segment.fLength; - assert(address); assert(length); if (!state->fMisaligned) { - state->fMisaligned |= (0 != (target->fAlignMask & address)); - if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask); + mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); + state->fMisaligned |= (0 != (mask & address)); + if (state->fMisaligned) DEBG("misaligned address %qx:%qx, %x\n", address, length, mask); + } + if (!state->fMisaligned) + { + mask = target->fAlignMaskLength; + state->fMisaligned |= (0 != (mask & length)); + if (state->fMisaligned) DEBG("misaligned length %qx:%qx, %x\n", address, length, mask); } if (state->fMisaligned && (kWalkPreflight & op)) @@ -352,8 +485,7 @@ IODMACommand::segmentOp( if (!length) return (kIOReturnSuccess); - numPages = atop_64(round_page_64(length)); - remapAddr = state->fCopyNext; + numPages = atop_64(round_page_64((address & PAGE_MASK) + length)); if (kWalkPreflight & op) { @@ -361,40 +493,78 @@ IODMACommand::segmentOp( } else { + vm_page_t lastPage; + lastPage = NULL; if (kWalkPrepare & op) { + lastPage = state->fCopyNext; for (IOItemCount idx = 0; idx < numPages; idx++) - gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx); - } - if (state->fDoubleBuffer) - state->fCopyNext += length; - else - { - state->fCopyNext += round_page(length); - remapAddr += (address & PAGE_MASK); + { + vm_page_set_offset(lastPage, atop_64(address) + idx); + lastPage = vm_page_get_next(lastPage); + } } - if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) + if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { - DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, - (kWalkSyncIn & op) ? "->" : "<-", - address, length, op); - if (kWalkSyncIn & op) - { // cppvNoModSnk - copypv(remapAddr, address, length, - cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); - } - else + lastPage = state->fCopyNext; + for (IOItemCount idx = 0; idx < numPages; idx++) { - copypv(address, remapAddr, length, - cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); + if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) + { + addr64_t cpuAddr = address; + addr64_t remapAddr; + uint64_t chunk; + + if ((kMapped == MAPTYPE(target->fMappingOptions)) + && target->fMapper) + { + cpuAddr = target->fMapper->mapToPhysicalAddress(address); + } + + remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); + if (!state->fDoubleBuffer) + { + remapAddr += (address & PAGE_MASK); + } + chunk = PAGE_SIZE - (address & PAGE_MASK); + if (chunk > length) + chunk = length; + + DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, + (kWalkSyncIn & op) ? "->" : "<-", + address, chunk, op); + + if (kWalkSyncIn & op) + { // cppvNoModSnk + copypv(remapAddr, cpuAddr, chunk, + cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); + } + else + { + copypv(cpuAddr, remapAddr, chunk, + cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); + } + address += chunk; + length -= chunk; + } + lastPage = vm_page_get_next(lastPage); } } + state->fCopyNext = lastPage; } return kIOReturnSuccess; } +IOBufferMemoryDescriptor * +IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) +{ + mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask + return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, + direction, length, mask)); +} + IOReturn IODMACommand::walkAll(UInt8 op) { @@ -404,62 +574,66 @@ IODMACommand::walkAll(UInt8 op) UInt32 numSegments; UInt64 offset; - if (gIOEnableCopyMapper && (kWalkPreflight & op)) + if (kWalkPreflight & op) { - state->fCopyContig = false; state->fMisaligned = false; state->fDoubleBuffer = false; state->fPrepared = false; - state->fCopyNext = 0; + state->fCopyNext = NULL; state->fCopyPageAlloc = 0; state->fCopyPageCount = 0; - state->fNextRemapIndex = 0; - state->fCopyMD = 0; + state->fNextRemapPage = NULL; + state->fCopyMD = 0; if (!(kWalkDoubleBuffer & op)) { offset = 0; numSegments = 0-1; - ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments); + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); } op &= ~kWalkPreflight; - state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op)); + state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); + state->fForceDoubleBuffer = false; if (state->fDoubleBuffer) state->fCopyPageCount = atop_64(round_page(state->fPreparedLength)); if (state->fCopyPageCount) { - IOMapper * mapper; - ppnum_t mapBase = 0; + vm_page_t mapBase = NULL; DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount); - mapper = gIOCopyMapper; - if (mapper) - mapBase = mapper->iovmAlloc(state->fCopyPageCount); - if (mapBase) + if (!fMapper && !state->fDoubleBuffer) { - state->fCopyPageAlloc = mapBase; - if (state->fCopyPageAlloc && state->fDoubleBuffer) + kern_return_t kr; + + if (fMapper) panic("fMapper copying"); + + kr = vm_page_alloc_list(state->fCopyPageCount, + KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase); + if (KERN_SUCCESS != kr) { - DEBG("contig copy map\n"); - state->fCopyContig = true; + DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr); + mapBase = NULL; } + } - state->fCopyNext = ptoa_64(state->fCopyPageAlloc); + if (mapBase) + { + state->fCopyPageAlloc = mapBase; + state->fCopyNext = state->fCopyPageAlloc; offset = 0; numSegments = 0-1; - ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments); + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); state->fPrepared = true; op &= ~(kWalkSyncIn | kWalkSyncOut); } else { DEBG("alloc IOBMD\n"); - state->fCopyMD = IOBufferMemoryDescriptor::withOptions( - fMDSummary.fDirection, state->fPreparedLength, page_size); + state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength); if (state->fCopyMD) { @@ -468,14 +642,14 @@ IODMACommand::walkAll(UInt8 op) } else { - DEBG("IODMACommand !iovmAlloc"); + DEBG("IODMACommand !alloc IOBMD"); return (kIOReturnNoResources); } } } } - if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) + if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) { if (state->fCopyPageCount) { @@ -483,10 +657,10 @@ IODMACommand::walkAll(UInt8 op) if (state->fCopyPageAlloc) { - state->fCopyNext = ptoa_64(state->fCopyPageAlloc); + state->fCopyNext = state->fCopyPageAlloc; offset = 0; numSegments = 0-1; - ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments); + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); } else if (state->fCopyMD) { @@ -519,7 +693,7 @@ IODMACommand::walkAll(UInt8 op) { if (state->fCopyPageAlloc) { - gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount); + vm_page_free_list(state->fCopyPageAlloc, FALSE); state->fCopyPageAlloc = 0; state->fCopyPageCount = 0; } @@ -534,6 +708,52 @@ IODMACommand::walkAll(UInt8 op) return (ret); } +UInt8 +IODMACommand::getNumAddressBits(void) +{ + return (fNumAddressBits); +} + +UInt32 +IODMACommand::getAlignment(void) +{ + return (fAlignMask + 1); +} + +uint32_t +IODMACommand::getAlignmentLength(void) +{ + return (fAlignMaskLength + 1); +} + +uint32_t +IODMACommand::getAlignmentInternalSegments(void) +{ + return (fAlignMaskInternalSegments + 1); +} + +IOReturn +IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + UInt64 offset, + UInt64 length, + bool flushCache, + bool synchronize) +{ + IOReturn ret; + + if (fActive) return kIOReturnNotPermitted; + + ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); + if (kIOReturnSuccess != ret) return (ret); + + ret = prepare(offset, length, flushCache, synchronize); + + return (ret); +} + IOReturn IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, UInt8 numAddressBits, @@ -547,84 +767,36 @@ IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, bool flushCache, bool synchronize) { - if (fActive) - return kIOReturnNotPermitted; - - if (!outSegFunc || !numAddressBits) - return kIOReturnBadArgument; - - bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc - || OutputLittle32 == outSegFunc); - if (is32Bit) - { - if (!numAddressBits) - numAddressBits = 32; - else if (numAddressBits > 32) - return kIOReturnBadArgument; // Wrong output function for bits - } - - if (numAddressBits && (numAddressBits < PAGE_SHIFT)) - return kIOReturnBadArgument; - - if (!maxSegmentSize) - maxSegmentSize--; // Set Max segment to -1 - if (!maxTransferSize) - maxTransferSize--; // Set Max transfer to -1 - - if (!mapper) - { - IOMapper::checkForSystemMapper(); - mapper = IOMapper::gSystem; - } - - switch (MAPTYPE(mappingOptions)) + SegmentOptions segmentOptions = { - case kMapped: break; - case kNonCoherent: fMapper = 0; break; - case kBypassed: - if (mapper && !mapper->getBypassMask(&fBypassMask)) - return kIOReturnBadArgument; - break; - default: - return kIOReturnBadArgument; + .fStructSize = sizeof(segmentOptions), + .fNumAddressBits = numAddressBits, + .fMaxSegmentSize = maxSegmentSize, + .fMaxTransferSize = maxTransferSize, + .fAlignment = alignment, + .fAlignmentLength = 1, + .fAlignmentInternalSegments = alignment }; - fNumSegments = 0; - fBypassMask = 0; - fOutSeg = outSegFunc; - fNumAddressBits = numAddressBits; - fMaxSegmentSize = maxSegmentSize; - fMappingOptions = mappingOptions; - fMaxTransferSize = maxTransferSize; - if (!alignment) - alignment = 1; - fAlignMask = alignment - 1; - fMapper = mapper; - - fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); - - return prepare(offset, length, flushCache, synchronize); + return (prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, + offset, length, flushCache, synchronize)); } IOReturn IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize) { - IODMACommandInternal * state = fInternalState; - IOReturn ret = kIOReturnSuccess; - MappingOptions mappingOptions = fMappingOptions; + IODMACommandInternal * state = fInternalState; + IOReturn ret = kIOReturnSuccess; + uint32_t mappingOptions = fMappingOptions; - if (!length) - length = fMDSummary.fLength; + // check specification has been set + if (!fOutSeg) return (kIOReturnNotReady); - if (length > fMaxTransferSize) - return kIOReturnNoSpace; + if (!length) length = fMDSummary.fLength; - if (IS_NONCOHERENT(mappingOptions) && flushCache) { - IOMemoryDescriptor *poMD = const_cast(fMemory); + if (length > fMaxTransferSize) return kIOReturnNoSpace; - poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength); - } if (fActive++) { if ((state->fPreparedOffset != offset) @@ -633,23 +805,35 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr } else { + if (fAlignMaskLength & length) return (kIOReturnNotAligned); + state->fPreparedOffset = offset; state->fPreparedLength = length; - state->fCopyContig = false; + state->fMapContig = false; state->fMisaligned = false; state->fDoubleBuffer = false; state->fPrepared = false; - state->fCopyNext = 0; + state->fCopyNext = NULL; state->fCopyPageAlloc = 0; state->fCopyPageCount = 0; - state->fNextRemapIndex = 0; + state->fNextRemapPage = NULL; state->fCopyMD = 0; + state->fLocalMapperAlloc = 0; + state->fLocalMapperAllocValid = false; + state->fLocalMapperAllocLength = 0; + state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem)); + + state->fSourceAlignMask = fAlignMask; + if (fMapper) + state->fSourceAlignMask &= page_mask; + state->fCursor = state->fIterateOnly || (!state->fCheckAddressing - && (!fAlignMask - || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask))))); + && (!state->fSourceAlignMask + || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); + if (!state->fCursor) { IOOptionBits op = kWalkPrepare | kWalkPreflight; @@ -657,8 +841,49 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr op |= kWalkSyncOut; ret = walkAll(op); } - if (kIOReturnSuccess == ret) - state->fPrepared = true; + + if (IS_NONCOHERENT(mappingOptions) && flushCache) + { + if (state->fCopyMD) + { + state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); + } + else + { + IOMemoryDescriptor * md = const_cast(fMemory); + md->performOperation(kIOMemoryIncoherentIOStore, offset, length); + } + } + + if (fMapper) + { + IOMDDMAMapArgs mapArgs; + bzero(&mapArgs, sizeof(mapArgs)); + mapArgs.fMapper = fMapper; + mapArgs.fCommand = this; + mapArgs.fMapSpec.device = state->fDevice; + mapArgs.fMapSpec.alignment = fAlignMask + 1; + mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; + mapArgs.fLength = state->fPreparedLength; + const IOMemoryDescriptor * md = state->fCopyMD; + if (md) { mapArgs.fOffset = 0; } else + { + md = fMemory; + mapArgs.fOffset = state->fPreparedOffset; + } + ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); +//IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength); + + if (kIOReturnSuccess == ret) + { + state->fLocalMapperAlloc = mapArgs.fAlloc; + state->fLocalMapperAllocValid = true; + state->fLocalMapperAllocLength = mapArgs.fAllocLength; + state->fMapContig = mapArgs.fMapContig; + } + if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess; + } + if (kIOReturnSuccess == ret) state->fPrepared = true; } return ret; } @@ -668,12 +893,29 @@ IODMACommand::complete(bool invalidateCache, bool synchronize) { IODMACommandInternal * state = fInternalState; IOReturn ret = kIOReturnSuccess; + IOMemoryDescriptor * copyMD; if (fActive < 1) return kIOReturnNotReady; if (!--fActive) { + copyMD = state->fCopyMD; + if (copyMD) copyMD->retain(); + + if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) + { + if (copyMD) + { + copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); + } + else + { + IOMemoryDescriptor * md = const_cast(fMemory); + md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); + } + } + if (!state->fCursor) { IOOptionBits op = kWalkComplete; @@ -681,19 +923,51 @@ IODMACommand::complete(bool invalidateCache, bool synchronize) op |= kWalkSyncIn; ret = walkAll(op); } - state->fPrepared = false; - if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) - { - IOMemoryDescriptor *poMD = const_cast(fMemory); + if (state->fLocalMapperAllocValid) + { + IOMDDMAMapArgs mapArgs; + bzero(&mapArgs, sizeof(mapArgs)); + mapArgs.fMapper = fMapper; + mapArgs.fCommand = this; + mapArgs.fAlloc = state->fLocalMapperAlloc; + mapArgs.fAllocLength = state->fLocalMapperAllocLength; + const IOMemoryDescriptor * md = copyMD; + if (md) { mapArgs.fOffset = 0; } + else + { + md = fMemory; + mapArgs.fOffset = state->fPreparedOffset; + } + + ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs)); - poMD->performOperation(kIOMemoryIncoherentIOFlush, 0, fMDSummary.fLength); + state->fLocalMapperAlloc = 0; + state->fLocalMapperAllocValid = false; + state->fLocalMapperAllocLength = 0; } + if (copyMD) copyMD->release(); + state->fPrepared = false; } return ret; } +IOReturn +IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length) +{ + IODMACommandInternal * state = fInternalState; + if (fActive < 1) + return (kIOReturnNotReady); + + if (offset) + *offset = state->fPreparedOffset; + if (length) + *length = state->fPreparedLength; + + return (kIOReturnSuccess); +} + IOReturn IODMACommand::synchronize(IOOptionBits options) { @@ -710,14 +984,13 @@ IODMACommand::synchronize(IOOptionBits options) op = 0; if (kForceDoubleBuffer & options) { - if (state->fDoubleBuffer) - return kIOReturnSuccess; - if (state->fCursor) - state->fCursor = false; - else - ret = walkAll(kWalkComplete); + if (state->fDoubleBuffer) return kIOReturnSuccess; + ret = complete(false /* invalidateCache */, true /* synchronize */); + state->fCursor = false; + state->fForceDoubleBuffer = true; + ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */); - op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer; + return (ret); } else if (state->fCursor) return kIOReturnSuccess; @@ -752,7 +1025,7 @@ IODMACommand::transferSegment(void *reference, void *segments, UInt32 segmentIndex) { - IODMACommandTransferContext * context = (IODMACommandTransferContext *) segments; + IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; UInt64 length = min(segment.fLength, context->remaining); addr64_t ioAddr = segment.fIOVMAddr; addr64_t cpuAddr = ioAddr; @@ -765,7 +1038,7 @@ IODMACommand::transferSegment(void *reference, if ((kMapped == MAPTYPE(target->fMappingOptions)) && target->fMapper) { - cpuAddr = target->fMapper->mapAddr(ioAddr); + cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr); copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); ioAddr += copyLen; } @@ -793,6 +1066,7 @@ IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UI { IODMACommandInternal * state = fInternalState; IODMACommandTransferContext context; + Segment64 segments[1]; UInt32 numSegments = 0-1; if (fActive < 1) @@ -806,7 +1080,7 @@ IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UI context.bufferOffset = 0; context.remaining = length; context.op = transferOp; - (void) genIOVMSegments(transferSegment, (void *) kWalkClient, &offset, &context, &numSegments); + (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments); return (length - context.remaining); } @@ -828,17 +1102,18 @@ IODMACommand::genIOVMSegments(UInt64 *offsetP, void *segmentsP, UInt32 *numSegmentsP) { - return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP)); + return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg, + offsetP, segmentsP, numSegmentsP)); } IOReturn -IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc, +IODMACommand::genIOVMSegments(uint32_t op, + InternalSegmentFunction outSegFunc, void *reference, UInt64 *offsetP, void *segmentsP, UInt32 *numSegmentsP) { - IOOptionBits op = (IOOptionBits) reference; IODMACommandInternal * internalState = fInternalState; IOOptionBits mdOp = kIOMDWalkSegments; IOReturn ret = kIOReturnSuccess; @@ -850,7 +1125,7 @@ IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc, return kIOReturnBadArgument; IOMDDMAWalkSegmentArgs *state = - (IOMDDMAWalkSegmentArgs *) fState; + (IOMDDMAWalkSegmentArgs *)(void *) fState; UInt64 offset = *offsetP + internalState->fPreparedOffset; UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; @@ -859,18 +1134,18 @@ IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc, return kIOReturnOverrun; if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { - state->fOffset = 0; - state->fIOVMAddr = 0; - internalState->fNextRemapIndex = 0; - internalState->fNewMD = false; - state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper); - mdOp = kIOMDFirstSegment; + state->fOffset = 0; + internalState->fIOVMAddrValid = state->fIOVMAddr = 0; + internalState->fNextRemapPage = NULL; + internalState->fNewMD = false; + state->fMapped = (0 != fMapper); + mdOp = kIOMDFirstSegment; }; - UInt64 bypassMask = fBypassMask; UInt32 segIndex = 0; UInt32 numSegments = *numSegmentsP; Segment64 curSeg = { 0, 0 }; + bool curSegValid = false; addr64_t maxPhys; if (fNumAddressBits && (fNumAddressBits < 64)) @@ -879,20 +1154,37 @@ IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc, maxPhys = 0; maxPhys--; - while ((state->fIOVMAddr) || state->fOffset < memLength) + while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) { - if (!state->fIOVMAddr) { + // state = next seg + if (!internalState->fIOVMAddrValid) { IOReturn rtn; state->fOffset = offset; state->fLength = memLength - offset; - if (internalState->fCopyContig && (kWalkClient & op)) + if (internalState->fMapContig && internalState->fLocalMapperAllocValid) { - state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc) - + offset - internalState->fPreparedOffset; + state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; rtn = kIOReturnSuccess; +#if 0 + { + uint64_t checkOffset; + IOPhysicalLength segLen; + for (checkOffset = 0; checkOffset < state->fLength; ) + { + addr64_t phys = const_cast(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); + if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) + { + panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, + state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, + phys, checkOffset); + } + checkOffset += page_size - (phys & page_mask); + } + } +#endif } else { @@ -902,123 +1194,185 @@ IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc, mdOp = kIOMDWalkSegments; } - if (rtn == kIOReturnSuccess) { - assert(state->fIOVMAddr); + if (rtn == kIOReturnSuccess) + { + internalState->fIOVMAddrValid = true; assert(state->fLength); + if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) { + UInt64 length = state->fLength; + offset += length; + curSeg.fLength += length; + internalState->fIOVMAddrValid = state->fIOVMAddr = 0; + } } else if (rtn == kIOReturnOverrun) - state->fIOVMAddr = state->fLength = 0; // At end + internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end else return rtn; - }; - - if (!curSeg.fIOVMAddr) { - UInt64 length = state->fLength; - - offset += length; - curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask; - curSeg.fLength = length; - state->fIOVMAddr = 0; - } - else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) { - UInt64 length = state->fLength; - offset += length; - curSeg.fLength += length; - state->fIOVMAddr = 0; - }; + } + // seg = state, offset = end of seg + if (!curSegValid) + { + UInt64 length = state->fLength; + offset += length; + curSeg.fIOVMAddr = state->fIOVMAddr; + curSeg.fLength = length; + curSegValid = true; + internalState->fIOVMAddrValid = state->fIOVMAddr = 0; + } - if (!state->fIOVMAddr) + if (!internalState->fIOVMAddrValid) { - if (kWalkClient & op) + // maxPhys + if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) { - if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) + if (internalState->fCursor) { - if (internalState->fCursor) + curSegValid = curSeg.fIOVMAddr = 0; + ret = kIOReturnMessageTooLarge; + break; + } + else if (curSeg.fIOVMAddr <= maxPhys) + { + UInt64 remain, newLength; + + newLength = (maxPhys + 1 - curSeg.fIOVMAddr); + DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength); + remain = curSeg.fLength - newLength; + state->fIOVMAddr = newLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; + curSeg.fLength = newLength; + state->fLength = remain; + offset -= remain; + } + else + { + UInt64 addr = curSeg.fIOVMAddr; + ppnum_t addrPage = atop_64(addr); + vm_page_t remap = NULL; + UInt64 remain, newLength; + + DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength); + + remap = internalState->fNextRemapPage; + if (remap && (addrPage == vm_page_get_offset(remap))) { - curSeg.fIOVMAddr = 0; - ret = kIOReturnMessageTooLarge; - break; } - else if (curSeg.fIOVMAddr <= maxPhys) + else for (remap = internalState->fCopyPageAlloc; + remap && (addrPage != vm_page_get_offset(remap)); + remap = vm_page_get_next(remap)) { - UInt64 remain, newLength; - - newLength = (maxPhys + 1 - curSeg.fIOVMAddr); - DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength); - remain = curSeg.fLength - newLength; - state->fIOVMAddr = newLength + curSeg.fIOVMAddr; - curSeg.fLength = newLength; - state->fLength = remain; - offset -= remain; } - else if (gIOCopyMapper) - { - DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength); - if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr( - ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex))) - { - curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex) - + (curSeg.fIOVMAddr & PAGE_MASK); - internalState->fNextRemapIndex += atop_64(round_page(curSeg.fLength)); - } - else for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++) - { - if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr( - ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex))) - { - curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex) - + (curSeg.fIOVMAddr & PAGE_MASK); - internalState->fNextRemapIndex = checkRemapIndex + atop_64(round_page(curSeg.fLength)); - break; - } - } - DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength); + if (!remap) panic("no remap page found"); + + curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) + + (addr & PAGE_MASK); + curSegValid = true; + internalState->fNextRemapPage = vm_page_get_next(remap); + + newLength = PAGE_SIZE - (addr & PAGE_MASK); + if (newLength < curSeg.fLength) + { + remain = curSeg.fLength - newLength; + state->fIOVMAddr = addr + newLength; + internalState->fIOVMAddrValid = true; + curSeg.fLength = newLength; + state->fLength = remain; + offset -= remain; } + DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset); } } + // reduce size of output segment + uint64_t reduce, leftover = 0; + + // fMaxSegmentSize if (curSeg.fLength > fMaxSegmentSize) { - UInt64 remain = curSeg.fLength - fMaxSegmentSize; + leftover += curSeg.fLength - fMaxSegmentSize; + curSeg.fLength = fMaxSegmentSize; + state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; + } - state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr; - curSeg.fLength = fMaxSegmentSize; + // alignment current length - state->fLength = remain; - offset -= remain; + reduce = (curSeg.fLength & fAlignMaskLength); + if (reduce && (curSeg.fLength > reduce)) + { + leftover += reduce; + curSeg.fLength -= reduce; + state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; } - if (internalState->fCursor - && (0 != (fAlignMask & curSeg.fIOVMAddr))) + // alignment next address + + reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); + if (reduce && (curSeg.fLength > reduce)) { - curSeg.fIOVMAddr = 0; - ret = kIOReturnNotAligned; - break; + leftover += reduce; + curSeg.fLength -= reduce; + state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; + } + + if (leftover) + { + DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n", + leftover, offset, + curSeg.fIOVMAddr, curSeg.fLength); + state->fLength = leftover; + offset -= leftover; + } + + // + + if (internalState->fCursor) + { + bool misaligned; + uint32_t mask; + + mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); + misaligned = (0 != (mask & curSeg.fIOVMAddr)); + if (!misaligned) + { + mask = fAlignMaskLength; + misaligned |= (0 != (mask & curSeg.fLength)); + } + if (misaligned) + { + if (misaligned) DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength); + curSegValid = curSeg.fIOVMAddr = 0; + ret = kIOReturnNotAligned; + break; + } } if (offset >= memLength) { curSeg.fLength -= (offset - memLength); offset = memLength; - state->fIOVMAddr = state->fLength = 0; // At end + internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end break; } } - if (state->fIOVMAddr) { + if (internalState->fIOVMAddrValid) { if ((segIndex + 1 == numSegments)) break; ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); - curSeg.fIOVMAddr = 0; + curSegValid = curSeg.fIOVMAddr = 0; if (kIOReturnSuccess != ret) break; } } - if (curSeg.fIOVMAddr) { + if (curSegValid) { ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); } @@ -1036,16 +1390,18 @@ IODMACommand::clientOutputSegment( void *reference, IODMACommand *target, Segment64 segment, void *vSegList, UInt32 outSegIndex) { + SegmentFunction segmentFunction = (SegmentFunction) reference; IOReturn ret = kIOReturnSuccess; - if ((target->fNumAddressBits < 64) - && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)) + if (target->fNumAddressBits && (target->fNumAddressBits < 64) + && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) + && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) { DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); ret = kIOReturnMessageTooLarge; } - if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex)) + if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) { DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); ret = kIOReturnMessageTooLarge; @@ -1054,6 +1410,16 @@ IODMACommand::clientOutputSegment( return (ret); } +IOReturn +IODMACommand::genIOVMSegments(SegmentFunction segmentFunction, + UInt64 *offsetP, + void *segmentsP, + UInt32 *numSegmentsP) +{ + return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction, + offsetP, segmentsP, numSegmentsP)); +} + bool IODMACommand::OutputHost32(IODMACommand *, Segment64 segment, void *vSegList, UInt32 outSegIndex)