]> git.saurik.com Git - apple/xnu.git/blobdiff - iokit/Kernel/IODMACommand.cpp
xnu-4570.1.46.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
index 0c2956ce7da2716713e04a0ff31c3c922060fca7..5feadeb14d3d580aa801b7f59b2af58b61c05c50 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -30,6 +30,7 @@
 
 #include <libkern/OSTypes.h>
 #include <libkern/OSByteOrder.h>
+#include <libkern/OSDebug.h>
 
 #include <IOKit/IOReturn.h>
 #include <IOKit/IOLib.h>
 #include <IOKit/IOBufferMemoryDescriptor.h>
 
 #include "IOKitKernelInternal.h"
-#include "IOCopyMapper.h"
 
 #define MAPTYPE(type)          ((UInt) (type) & kTypeMask)
-#define IS_MAPPED(type)                (MAPTYPE(type) == kMapped)
-#define IS_BYPASSED(type)      (MAPTYPE(type) == kBypassed)
 #define IS_NONCOHERENT(type)   (MAPTYPE(type) == kNonCoherent)
 
-
-static bool gIOEnableCopyMapper  = true;
-
 enum 
 {
     kWalkSyncIn       = 0x01,  // bounce -> md 
@@ -61,30 +56,6 @@ enum
     kWalkClient       = 0x80
 };
 
-struct ExpansionData
-{
-    IOMDDMAWalkSegmentState fState;
-    IOMDDMACharacteristics  fMDSummary;
-
-    UInt64 fPreparedOffset;
-    UInt64 fPreparedLength;
-
-    UInt8  fCursor;
-    UInt8  fCheckAddressing;
-    UInt8  fIterateOnly;
-    UInt8  fMisaligned;
-    UInt8  fCopyContig;
-    UInt8  fPrepared;
-    UInt8  fDoubleBuffer;
-    UInt8  __pad[1];
-
-    ppnum_t  fCopyPageAlloc;
-    ppnum_t  fCopyPageCount;
-    addr64_t fCopyNext;
-
-    class IOBufferMemoryDescriptor * fCopyMD;
-};
-typedef ExpansionData IODMACommandInternal;
 
 #define fInternalState reserved
 #define fState         reserved->fState
@@ -104,25 +75,24 @@ typedef ExpansionData IODMACommandInternal;
 #endif
 
 #if 0
-#define DEBG(fmt, args...)     { kprintf(fmt, ## args); }
+#define DEBG(fmt, args...)     { IOLog(fmt, ## args); kprintf(fmt, ## args); }
 #else
 #define DEBG(fmt, args...)     {}
 #endif
 
-
 /**************************** class IODMACommand ***************************/
 
 #undef super
-#define super OSObject
+#define super IOCommand
 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
 
-OSMetaClassDefineReservedUnused(IODMACommand,  0);
-OSMetaClassDefineReservedUnused(IODMACommand,  1);
-OSMetaClassDefineReservedUnused(IODMACommand,  2);
-OSMetaClassDefineReservedUnused(IODMACommand,  3);
-OSMetaClassDefineReservedUnused(IODMACommand,  4);
-OSMetaClassDefineReservedUnused(IODMACommand,  5);
-OSMetaClassDefineReservedUnused(IODMACommand,  6);
+OSMetaClassDefineReservedUsed(IODMACommand,  0);
+OSMetaClassDefineReservedUsed(IODMACommand,  1);
+OSMetaClassDefineReservedUsed(IODMACommand,  2);
+OSMetaClassDefineReservedUsed(IODMACommand,  3);
+OSMetaClassDefineReservedUsed(IODMACommand,  4);
+OSMetaClassDefineReservedUsed(IODMACommand,  5);
+OSMetaClassDefineReservedUsed(IODMACommand,  6);
 OSMetaClassDefineReservedUnused(IODMACommand,  7);
 OSMetaClassDefineReservedUnused(IODMACommand,  8);
 OSMetaClassDefineReservedUnused(IODMACommand,  9);
@@ -133,6 +103,39 @@ OSMetaClassDefineReservedUnused(IODMACommand, 13);
 OSMetaClassDefineReservedUnused(IODMACommand, 14);
 OSMetaClassDefineReservedUnused(IODMACommand, 15);
 
+IODMACommand *
+IODMACommand::withRefCon(void * refCon)
+{
+    IODMACommand * me = new IODMACommand;
+
+    if (me && !me->initWithRefCon(refCon))
+    {
+        me->release();
+        return 0;
+    }
+
+    return me;
+}
+
+IODMACommand *
+IODMACommand::withSpecification(SegmentFunction  outSegFunc,
+                         const SegmentOptions * segmentOptions,
+                         uint32_t               mappingOptions,
+                         IOMapper             * mapper,
+                         void                 * refCon)
+{
+    IODMACommand * me = new IODMACommand;
+
+    if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, 
+                                        mapper, refCon))
+    {
+        me->release();
+        return 0;
+    }
+
+    return me;
+}
+
 IODMACommand *
 IODMACommand::withSpecification(SegmentFunction outSegFunc,
                                UInt8           numAddressBits,
@@ -152,7 +155,7 @@ IODMACommand::withSpecification(SegmentFunction outSegFunc,
     {
         me->release();
         return 0;
-    };
+    }
 
     return me;
 }
@@ -160,12 +163,54 @@ IODMACommand::withSpecification(SegmentFunction outSegFunc,
 IODMACommand *
 IODMACommand::cloneCommand(void *refCon)
 {
-    return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
-           fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
+    SegmentOptions segmentOptions =
+    {
+       .fStructSize                = sizeof(segmentOptions),
+       .fNumAddressBits            = (uint8_t)fNumAddressBits,
+       .fMaxSegmentSize            = fMaxSegmentSize,
+       .fMaxTransferSize           = fMaxTransferSize,
+       .fAlignment                 = fAlignMask + 1,
+       .fAlignmentLength           = fAlignMaskInternalSegments + 1,
+       .fAlignmentInternalSegments = fAlignMaskLength + 1
+    };
+
+    return (IODMACommand::withSpecification(fOutSeg, &segmentOptions,
+                                           fMappingOptions, fMapper, refCon));
 }
 
 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
 
+bool
+IODMACommand::initWithRefCon(void * refCon)
+{
+    if (!super::init()) return (false);
+
+    if (!reserved)
+    {
+       reserved = IONew(IODMACommandInternal, 1);
+       if (!reserved) return false;
+    }
+    bzero(reserved, sizeof(IODMACommandInternal));
+    fRefCon = refCon;
+
+    return (true);
+}
+
+bool
+IODMACommand::initWithSpecification(SegmentFunction       outSegFunc,
+                                   const SegmentOptions * segmentOptions,
+                                   uint32_t               mappingOptions,
+                                   IOMapper             * mapper,
+                                   void                 * refCon)
+{
+    if (!initWithRefCon(refCon)) return false;
+
+    if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, 
+                                            mappingOptions, mapper))      return false;
+
+    return (true);
+}
+
 bool
 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
                                    UInt8           numAddressBits,
@@ -176,73 +221,118 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
                                    IOMapper       *mapper,
                                    void           *refCon)
 {
-    if (!super::init() || !outSegFunc || !numAddressBits)
-        return false;
+    SegmentOptions segmentOptions =
+    {
+       .fStructSize                = sizeof(segmentOptions),
+       .fNumAddressBits            = numAddressBits,
+       .fMaxSegmentSize            = maxSegmentSize,
+       .fMaxTransferSize           = maxTransferSize,
+       .fAlignment                 = alignment,
+       .fAlignmentLength           = 1,
+       .fAlignmentInternalSegments = alignment
+    };
+
+    return (initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon));
+}
+
+IOReturn
+IODMACommand::setSpecification(SegmentFunction        outSegFunc,
+                              const SegmentOptions * segmentOptions,
+                              uint32_t               mappingOptions,
+                              IOMapper             * mapper)
+{
+    IOService * device = 0;
+    UInt8       numAddressBits;
+    UInt64      maxSegmentSize;
+    UInt64      maxTransferSize;
+    UInt32      alignment;
+
+    bool        is32Bit;
+
+    if (!outSegFunc || !segmentOptions) return (kIOReturnBadArgument);
+
+    is32Bit = ((OutputHost32 == outSegFunc) 
+               || (OutputBig32 == outSegFunc)
+                || (OutputLittle32 == outSegFunc));
 
-    bool is32Bit = (OutputHost32   == outSegFunc || OutputBig32 == outSegFunc
-                 || OutputLittle32 == outSegFunc);
+    numAddressBits = segmentOptions->fNumAddressBits;
+    maxSegmentSize = segmentOptions->fMaxSegmentSize;
+    maxTransferSize = segmentOptions->fMaxTransferSize;
+    alignment = segmentOptions->fAlignment;
     if (is32Bit)
     {
        if (!numAddressBits)
            numAddressBits = 32;
        else if (numAddressBits > 32)
-           return false;               // Wrong output function for bits
+           return (kIOReturnBadArgument);              // Wrong output function for bits
     }
 
-    if (numAddressBits && (numAddressBits < PAGE_SHIFT))
-       return false;
+    if (numAddressBits && (numAddressBits < PAGE_SHIFT)) return (kIOReturnBadArgument);
 
-    if (!maxSegmentSize)
-       maxSegmentSize--;       // Set Max segment to -1
-    if (!maxTransferSize)
-       maxTransferSize--;      // Set Max transfer to -1
+    if (!maxSegmentSize)  maxSegmentSize--;    // Set Max segment to -1
+    if (!maxTransferSize) maxTransferSize--;   // Set Max transfer to -1
 
-    if (!mapper)
+    if (mapper && !OSDynamicCast(IOMapper, mapper))
+    {
+       device = mapper;
+       mapper = 0;
+    }
+    if (!mapper && (kUnmapped != MAPTYPE(mappingOptions)))
     {
         IOMapper::checkForSystemMapper();
        mapper = IOMapper::gSystem;
     }
 
     fNumSegments     = 0;
-    fBypassMask      = 0;
     fOutSeg         = outSegFunc;
     fNumAddressBits  = numAddressBits;
     fMaxSegmentSize  = maxSegmentSize;
     fMappingOptions  = mappingOptions;
     fMaxTransferSize = maxTransferSize;
-    if (!alignment)
-       alignment = 1;
+    if (!alignment)    alignment = 1;
     fAlignMask      = alignment - 1;
-    fMapper          = mapper;
-    fRefCon          = refCon;
+
+    alignment = segmentOptions->fAlignmentLength;
+    if (!alignment) alignment = 1;
+    fAlignMaskLength = alignment - 1;
+
+    alignment = segmentOptions->fAlignmentInternalSegments;
+    if (!alignment) alignment = (fAlignMask + 1);
+    fAlignMaskInternalSegments = alignment - 1;
 
     switch (MAPTYPE(mappingOptions))
     {
-    case kMapped:                   break;
-    case kNonCoherent: fMapper = 0; break;
+    case kMapped:              break;
+    case kUnmapped:     break;
+    case kNonCoherent:         break;
+
     case kBypassed:
-       if (mapper && !mapper->getBypassMask(&fBypassMask))
-           return false;
-       break;
+       if (!mapper)    break;
+       return (kIOReturnBadArgument);
+
     default:
-       return false;
+       return (kIOReturnBadArgument);
     };
 
-    reserved = IONew(ExpansionData, 1);
-    if (!reserved)
-       return false;
-    bzero(reserved, sizeof(ExpansionData));
+    if (mapper != fMapper)
+    {
+       if (mapper)  mapper->retain();
+       if (fMapper) fMapper->release();
+       fMapper = mapper;
+    }
 
     fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
-    
-    return true;
+    fInternalState->fDevice = device;
+
+    return (kIOReturnSuccess);
 }
 
 void
 IODMACommand::free()
 {
-    if (reserved)
-       IODelete(reserved, ExpansionData, 1);
+    if (reserved) IODelete(reserved, IODMACommandInternal, 1);
+
+    if (fMapper) fMapper->release();
 
     super::free();
 }
@@ -250,6 +340,8 @@ IODMACommand::free()
 IOReturn
 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
 {
+    IOReturn err = kIOReturnSuccess;
+       
     if (mem == fMemory)
     {
        if (!autoPrepare)
@@ -267,45 +359,47 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar
        if (fActive)
            return kIOReturnBusy;
        clearMemoryDescriptor();
-    };
+    }
 
     if (mem) {
        bzero(&fMDSummary, sizeof(fMDSummary));
-       IOReturn rtn = mem->dmaCommandOperation(
-               kIOMDGetCharacteristics,
-               &fMDSummary, sizeof(fMDSummary));
-       if (rtn)
-           return rtn;
+       err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
+                                      &fMDSummary, sizeof(fMDSummary));
+       if (err)
+           return err;
 
        ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
 
        if ((kMapped == MAPTYPE(fMappingOptions))
-           && fMapper 
-           && (!fNumAddressBits || (fNumAddressBits >= 31)))
-           // assuming mapped space is 2G
+           && fMapper)
            fInternalState->fCheckAddressing = false;
        else
            fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
 
+       fInternalState->fNewMD = true;
        mem->retain();
        fMemory = mem;
-
-       if (autoPrepare)
-           return prepare();
-    };
-
-    return kIOReturnSuccess;
+       if (!fMapper) mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
+       if (autoPrepare) {
+           err = prepare();
+           if (err) {
+               clearMemoryDescriptor();
+           }
+       }
+    }
+       
+    return err;
 }
 
 IOReturn
 IODMACommand::clearMemoryDescriptor(bool autoComplete)
 {
-    if (fActive && !autoComplete)
-       return (kIOReturnNotReady);
+    if (fActive && !autoComplete) return (kIOReturnNotReady);
 
-    if (fMemory) {
-       while (fActive)
-           complete();
+    if (fMemory)
+    {
+       while (fActive) complete();
+       if (!fMapper) fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
        fMemory->release();
        fMemory = 0;
     }
@@ -319,6 +413,16 @@ IODMACommand::getMemoryDescriptor() const
     return fMemory;
 }
 
+IOMemoryDescriptor *
+IODMACommand::getIOMemoryDescriptor() const
+{
+    IOMemoryDescriptor * mem;
+
+    mem = reserved->fCopyMD;
+    if (!mem) mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory);
+
+    return (mem);
+}
 
 IOReturn
 IODMACommand::segmentOp(
@@ -328,15 +432,15 @@ IODMACommand::segmentOp(
                        void         *segments,
                        UInt32        segmentIndex)
 {
-    IOOptionBits op = (IOOptionBits) reference;
+    IOOptionBits op = (uintptr_t) reference;
     addr64_t     maxPhys, address;
-    addr64_t     remapAddr = 0;
     uint64_t     length;
     uint32_t     numPages;
+    uint32_t     mask;
 
     IODMACommandInternal * state = target->reserved;
 
-    if (target->fNumAddressBits && (target->fNumAddressBits < 64))
+    if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper))
        maxPhys = (1ULL << target->fNumAddressBits);
     else
        maxPhys = 0;
@@ -345,13 +449,19 @@ IODMACommand::segmentOp(
     address = segment.fIOVMAddr;
     length = segment.fLength;
 
-    assert(address);
     assert(length);
 
     if (!state->fMisaligned)
     {
-       state->fMisaligned |= (0 != (target->fAlignMask & address));
-       if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
+       mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask);
+       state->fMisaligned |= (0 != (mask & address));
+       if (state->fMisaligned) DEBG("misaligned address %qx:%qx, %x\n", address, length, mask);
+    }
+    if (!state->fMisaligned)
+    {
+       mask = target->fAlignMaskLength;
+       state->fMisaligned |= (0 != (mask & length));
+       if (state->fMisaligned) DEBG("misaligned length %qx:%qx, %x\n", address, length, mask);
     }
 
     if (state->fMisaligned && (kWalkPreflight & op))
@@ -375,8 +485,7 @@ IODMACommand::segmentOp(
     if (!length)
        return (kIOReturnSuccess);
 
-    numPages = atop_64(round_page_64(length));
-    remapAddr = state->fCopyNext;
+    numPages = atop_64(round_page_64((address & PAGE_MASK) + length));
 
     if (kWalkPreflight & op)
     {
@@ -384,40 +493,78 @@ IODMACommand::segmentOp(
     }
     else
     {
+       vm_page_t lastPage;
+       lastPage = NULL;
        if (kWalkPrepare & op)
        {
+           lastPage = state->fCopyNext;
            for (IOItemCount idx = 0; idx < numPages; idx++)
-               gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
-       }
-       if (state->fDoubleBuffer)
-           state->fCopyNext += length;
-       else
-       {
-           state->fCopyNext += round_page(length);
-           remapAddr += (address & PAGE_MASK);
+           {
+               vm_page_set_offset(lastPage, atop_64(address) + idx);
+               lastPage = vm_page_get_next(lastPage);
+           }
        }
 
-       if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
+       if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
        {
-           DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, 
-                       (kWalkSyncIn & op) ? "->" : "<-", 
-                       address, length, op);
-           if (kWalkSyncIn & op)
-           { // cppvNoModSnk
-               copypv(remapAddr, address, length,
-                               cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
-           }
-           else
+           lastPage = state->fCopyNext;
+           for (IOItemCount idx = 0; idx < numPages; idx++)
            {
-               copypv(address, remapAddr, length,
-                               cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
+               if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
+               {
+                   addr64_t cpuAddr = address;
+                   addr64_t remapAddr;
+                   uint64_t chunk;
+
+                   if ((kMapped == MAPTYPE(target->fMappingOptions))
+                       && target->fMapper)
+                   {
+                       cpuAddr = target->fMapper->mapToPhysicalAddress(address);
+                   }
+       
+                   remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
+                   if (!state->fDoubleBuffer)
+                   {
+                       remapAddr += (address & PAGE_MASK);
+                   }
+                   chunk = PAGE_SIZE - (address & PAGE_MASK);
+                   if (chunk > length)
+                       chunk = length;
+
+                   DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, 
+                               (kWalkSyncIn & op) ? "->" : "<-", 
+                               address, chunk, op);
+
+                   if (kWalkSyncIn & op)
+                   { // cppvNoModSnk
+                       copypv(remapAddr, cpuAddr, chunk,
+                                       cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
+                   }
+                   else
+                   {
+                       copypv(cpuAddr, remapAddr, chunk,
+                                       cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
+                   }
+                   address += chunk;
+                   length -= chunk;
+               }
+               lastPage = vm_page_get_next(lastPage);
            }
        }
+       state->fCopyNext = lastPage;
     }
 
     return kIOReturnSuccess;
 }
 
+IOBufferMemoryDescriptor * 
+IODMACommand::createCopyBuffer(IODirection direction, UInt64 length)
+{
+    mach_vm_address_t mask = 0xFFFFF000;       //state->fSourceAlignMask
+    return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, 
+                                                       direction, length, mask));
+}
+
 IOReturn
 IODMACommand::walkAll(UInt8 op)
 {
@@ -427,61 +574,66 @@ IODMACommand::walkAll(UInt8 op)
     UInt32       numSegments;
     UInt64       offset;
 
-    if (gIOEnableCopyMapper && (kWalkPreflight & op))
+    if (kWalkPreflight & op)
     {
-       state->fCopyContig     = false;
        state->fMisaligned     = false;
        state->fDoubleBuffer   = false;
        state->fPrepared       = false;
-       state->fCopyNext       = 0;
+       state->fCopyNext       = NULL;
        state->fCopyPageAlloc  = 0;
        state->fCopyPageCount  = 0;
-       state->fCopyMD         = 0;
+       state->fNextRemapPage  = NULL;
+       state->fCopyMD         = 0;
 
        if (!(kWalkDoubleBuffer & op))
        {
            offset = 0;
            numSegments = 0-1;
-           ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+           ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
        }
 
        op &= ~kWalkPreflight;
 
-       state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
+       state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer);
+       state->fForceDoubleBuffer = false;
        if (state->fDoubleBuffer)
            state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
 
        if (state->fCopyPageCount)
        {
-           IOMapper * mapper;
-           ppnum_t    mapBase = 0;
+           vm_page_t mapBase = NULL;
 
            DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
 
-           mapper = gIOCopyMapper;
-           if (mapper)
-               mapBase = mapper->iovmAlloc(state->fCopyPageCount);
-           if (mapBase)
+           if (!fMapper && !state->fDoubleBuffer)
            {
-               state->fCopyPageAlloc = mapBase;
-               if (state->fCopyPageAlloc && state->fDoubleBuffer)
+               kern_return_t kr;
+
+               if (fMapper) panic("fMapper copying");
+
+               kr = vm_page_alloc_list(state->fCopyPageCount, 
+                                       KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
+               if (KERN_SUCCESS != kr)
                {
-                   DEBG("contig copy map\n");
-                   state->fCopyContig = true;
+                   DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
+                   mapBase = NULL;
                }
+           }
 
-               state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
+           if (mapBase)
+           {
+               state->fCopyPageAlloc = mapBase;
+               state->fCopyNext = state->fCopyPageAlloc;
                offset = 0;
                numSegments = 0-1;
-               ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+               ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
                state->fPrepared = true;
                op &= ~(kWalkSyncIn | kWalkSyncOut);
            }
            else
            {
                DEBG("alloc IOBMD\n");
-               state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
-                                   fMDSummary.fDirection, state->fPreparedLength, page_size);
+               state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength);
 
                if (state->fCopyMD)
                {
@@ -490,14 +642,14 @@ IODMACommand::walkAll(UInt8 op)
                }
                else
                {
-                   DEBG("IODMACommand !iovmAlloc");
+                   DEBG("IODMACommand !alloc IOBMD");
                    return (kIOReturnNoResources);
                }
            }
        }
     }
 
-    if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
+    if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
     {
        if (state->fCopyPageCount)
        {
@@ -505,10 +657,10 @@ IODMACommand::walkAll(UInt8 op)
 
            if (state->fCopyPageAlloc)
            {
-               state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
+               state->fCopyNext = state->fCopyPageAlloc;
                offset = 0;
                numSegments = 0-1;
-               ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
+               ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
            }
            else if (state->fCopyMD)
            {
@@ -541,7 +693,7 @@ IODMACommand::walkAll(UInt8 op)
     {
        if (state->fCopyPageAlloc)
        {
-           gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
+           vm_page_free_list(state->fCopyPageAlloc, FALSE);
            state->fCopyPageAlloc = 0;
            state->fCopyPageCount = 0;
        }
@@ -556,25 +708,95 @@ IODMACommand::walkAll(UInt8 op)
     return (ret);
 }
 
+UInt8
+IODMACommand::getNumAddressBits(void)
+{
+    return (fNumAddressBits);
+}
+
+UInt32
+IODMACommand::getAlignment(void)
+{
+    return (fAlignMask + 1);
+}
+
+uint32_t
+IODMACommand::getAlignmentLength(void)
+{
+    return (fAlignMaskLength + 1);
+}
+
+uint32_t
+IODMACommand::getAlignmentInternalSegments(void)
+{
+    return (fAlignMaskInternalSegments + 1);
+}
+
+IOReturn
+IODMACommand::prepareWithSpecification(SegmentFunction       outSegFunc,
+                                      const SegmentOptions * segmentOptions,
+                                      uint32_t               mappingOptions,
+                                      IOMapper             * mapper,
+                                      UInt64                 offset,
+                                      UInt64                 length,
+                                      bool                   flushCache,
+                                      bool                   synchronize)
+{
+    IOReturn ret;
+
+    if (fActive) return kIOReturnNotPermitted;
+
+    ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper);
+    if (kIOReturnSuccess != ret) return (ret);
+
+    ret = prepare(offset, length, flushCache, synchronize);
+
+    return (ret);
+}
+
+IOReturn
+IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
+                                      UInt8            numAddressBits,
+                                      UInt64           maxSegmentSize,
+                                      MappingOptions   mappingOptions,
+                                      UInt64           maxTransferSize,
+                                      UInt32           alignment,
+                                      IOMapper         *mapper,
+                                      UInt64           offset,
+                                      UInt64           length,
+                                      bool             flushCache,
+                                      bool             synchronize)
+{
+    SegmentOptions segmentOptions =
+    {
+       .fStructSize                = sizeof(segmentOptions),
+       .fNumAddressBits            = numAddressBits,
+       .fMaxSegmentSize            = maxSegmentSize,
+       .fMaxTransferSize           = maxTransferSize,
+       .fAlignment                 = alignment,
+       .fAlignmentLength           = 1,
+       .fAlignmentInternalSegments = alignment
+    };
+
+    return (prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper,
+                                       offset, length, flushCache, synchronize));
+}
+
+
 IOReturn 
 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
 {
-    IODMACommandInternal * state = fInternalState;
-    IOReturn               ret   = kIOReturnSuccess;
+    IODMACommandInternal *  state = fInternalState;
+    IOReturn                  ret = kIOReturnSuccess;
+    uint32_t       mappingOptions = fMappingOptions;
 
-    if (!length)
-       length = fMDSummary.fLength;
+    // check specification has been set
+    if (!fOutSeg) return (kIOReturnNotReady);
 
-    if (length > fMaxTransferSize)
-       return kIOReturnNoSpace;
+    if (!length) length = fMDSummary.fLength;
 
-#if 0
-    if (IS_NONCOHERENT(mappingOptions) && flushCache) {
-       IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
+    if (length > fMaxTransferSize) return kIOReturnNoSpace;
 
-       poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
-    }
-#endif
     if (fActive++)
     {
        if ((state->fPreparedOffset != offset)
@@ -583,22 +805,35 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr
     }
     else
     {
+       if (fAlignMaskLength & length) return (kIOReturnNotAligned);
+
        state->fPreparedOffset = offset;
        state->fPreparedLength = length;
 
-       state->fCopyContig     = false;
+       state->fMapContig      = false;
        state->fMisaligned     = false;
        state->fDoubleBuffer   = false;
        state->fPrepared       = false;
-       state->fCopyNext       = 0;
+       state->fCopyNext       = NULL;
        state->fCopyPageAlloc  = 0;
        state->fCopyPageCount  = 0;
+       state->fNextRemapPage  = NULL;
        state->fCopyMD         = 0;
+       state->fLocalMapperAlloc       = 0;
+       state->fLocalMapperAllocValid  = false;
+       state->fLocalMapperAllocLength = 0;
+
+       state->fLocalMapper    = (fMapper && (fMapper != IOMapper::gSystem));
 
+       state->fSourceAlignMask = fAlignMask;
+       if (fMapper)
+           state->fSourceAlignMask &= page_mask;
+       
        state->fCursor = state->fIterateOnly
                        || (!state->fCheckAddressing
-                           && (!fAlignMask
-                               || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
+                           && (!state->fSourceAlignMask
+                               || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
+
        if (!state->fCursor)
        {
            IOOptionBits op = kWalkPrepare | kWalkPreflight;
@@ -606,8 +841,49 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr
                op |= kWalkSyncOut;
            ret = walkAll(op);
        }
-       if (kIOReturnSuccess == ret)
-           state->fPrepared = true;
+
+       if (IS_NONCOHERENT(mappingOptions) && flushCache) 
+       {
+           if (state->fCopyMD)
+           {
+               state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
+           }
+           else
+           {
+               IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory);
+               md->performOperation(kIOMemoryIncoherentIOStore, offset, length);
+           }
+       }
+
+       if (fMapper)
+       {
+           IOMDDMAMapArgs mapArgs;
+           bzero(&mapArgs, sizeof(mapArgs));
+           mapArgs.fMapper = fMapper;
+           mapArgs.fCommand = this;
+           mapArgs.fMapSpec.device         = state->fDevice;
+           mapArgs.fMapSpec.alignment      = fAlignMask + 1;
+           mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
+           mapArgs.fLength = state->fPreparedLength;
+           const IOMemoryDescriptor * md = state->fCopyMD;
+           if (md) { mapArgs.fOffset = 0; } else
+           {
+               md = fMemory;
+               mapArgs.fOffset = state->fPreparedOffset;
+           }
+           ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
+//IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength);
+
+           if (kIOReturnSuccess == ret)
+           {
+               state->fLocalMapperAlloc       = mapArgs.fAlloc;
+               state->fLocalMapperAllocValid  = true;
+               state->fLocalMapperAllocLength = mapArgs.fAllocLength;
+               state->fMapContig = mapArgs.fMapContig;
+           }
+           if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess;
+       }
+       if (kIOReturnSuccess == ret) state->fPrepared = true;
     }
     return ret;
 }
@@ -617,35 +893,81 @@ IODMACommand::complete(bool invalidateCache, bool synchronize)
 {
     IODMACommandInternal * state = fInternalState;
     IOReturn               ret   = kIOReturnSuccess;
+    IOMemoryDescriptor   * copyMD;
 
     if (fActive < 1)
        return kIOReturnNotReady;
 
     if (!--fActive)
     {
+        copyMD = state->fCopyMD;
+       if (copyMD) copyMD->retain();
+
+       if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) 
+       {
+           if (copyMD)
+           {
+               copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength);
+           }
+           else
+           {
+               IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory);
+               md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
+           }
+       }
+
        if (!state->fCursor)
        {
-           IOOptionBits op = kWalkComplete;
-           if (synchronize)
-               op |= kWalkSyncIn;
-           ret = walkAll(op);
+               IOOptionBits op = kWalkComplete;
+               if (synchronize)
+                       op |= kWalkSyncIn;
+               ret = walkAll(op);
        }
-       state->fPrepared = false;
 
-#if 0
-       if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
-       { 
-           // XXX gvdl: need invalidate before Chardonnay ships
-           IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
+       if (state->fLocalMapperAllocValid)
+       {
+           IOMDDMAMapArgs mapArgs;
+           bzero(&mapArgs, sizeof(mapArgs));
+           mapArgs.fMapper = fMapper;
+           mapArgs.fCommand = this;
+           mapArgs.fAlloc = state->fLocalMapperAlloc;
+           mapArgs.fAllocLength = state->fLocalMapperAllocLength;
+           const IOMemoryDescriptor * md = copyMD;
+           if (md) { mapArgs.fOffset = 0; }
+           else
+           {
+               md = fMemory;
+               mapArgs.fOffset = state->fPreparedOffset;
+           }
 
-           poMD->performOperation(kIOMemoryIncoherentIOInvalidate, 0, fMDSummary.fLength);
+           ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs));
+
+           state->fLocalMapperAlloc       = 0;
+           state->fLocalMapperAllocValid  = false;
+           state->fLocalMapperAllocLength = 0;
        }
-#endif
+       if (copyMD) copyMD->release();
+       state->fPrepared = false;
     }
 
     return ret;
 }
 
+IOReturn
+IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
+{
+    IODMACommandInternal * state = fInternalState;
+    if (fActive < 1)
+       return (kIOReturnNotReady);
+
+    if (offset)
+       *offset = state->fPreparedOffset;
+    if (length)
+       *length = state->fPreparedLength;
+
+    return (kIOReturnSuccess);
+}
+
 IOReturn
 IODMACommand::synchronize(IOOptionBits options)
 {
@@ -662,14 +984,13 @@ IODMACommand::synchronize(IOOptionBits options)
     op = 0;
     if (kForceDoubleBuffer & options)
     {
-       if (state->fDoubleBuffer)
-           return kIOReturnSuccess;
-       if (state->fCursor)
-           state->fCursor = false;
-       else
-           ret = walkAll(kWalkComplete);
+       if (state->fDoubleBuffer) return kIOReturnSuccess;
+       ret = complete(false /* invalidateCache */, true /* synchronize */);
+       state->fCursor = false;
+       state->fForceDoubleBuffer = true;
+        ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */);
 
-       op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
+       return (ret);
     }
     else if (state->fCursor)
        return kIOReturnSuccess;
@@ -684,22 +1005,115 @@ IODMACommand::synchronize(IOOptionBits options)
     return ret;
 }
 
+struct IODMACommandTransferContext
+{
+    void *   buffer;
+    UInt64   bufferOffset;
+    UInt64   remaining;
+    UInt32   op;
+};
+enum
+{
+    kIODMACommandTransferOpReadBytes  = 1,
+    kIODMACommandTransferOpWriteBytes = 2
+};
+
+IOReturn
+IODMACommand::transferSegment(void   *reference,
+                       IODMACommand *target,
+                       Segment64     segment,
+                       void         *segments,
+                       UInt32        segmentIndex)
+{
+    IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
+    UInt64   length  = min(segment.fLength, context->remaining);
+    addr64_t ioAddr  = segment.fIOVMAddr;
+    addr64_t cpuAddr = ioAddr;
+
+    context->remaining -= length;
+
+    while (length)
+    {
+       UInt64 copyLen = length;
+       if ((kMapped == MAPTYPE(target->fMappingOptions))
+           && target->fMapper)
+       {
+           cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr);
+           copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
+           ioAddr += copyLen;
+       }
+
+       switch (context->op)
+       {
+           case kIODMACommandTransferOpReadBytes:
+               copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
+                                   cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
+               break;
+           case kIODMACommandTransferOpWriteBytes:
+               copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
+                               cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
+               break;
+       }
+       length                -= copyLen;
+       context->bufferOffset += copyLen;
+    }
+    
+    return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
+}
+
+UInt64
+IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
+{
+    IODMACommandInternal *      state = fInternalState;
+    IODMACommandTransferContext context;
+    Segment64                  segments[1];
+    UInt32                      numSegments = 0-1;
+
+    if (fActive < 1)
+        return (0);
+
+    if (offset >= state->fPreparedLength)
+        return (0);
+    length = min(length, state->fPreparedLength - offset);
+
+    context.buffer       = buffer;
+    context.bufferOffset = 0;
+    context.remaining    = length;
+    context.op           = transferOp;
+    (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
+
+    return (length - context.remaining);
+}
+
+UInt64
+IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
+{
+    return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
+}
+
+UInt64
+IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
+{
+    return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
+}
+
 IOReturn
 IODMACommand::genIOVMSegments(UInt64 *offsetP,
                              void   *segmentsP,
                              UInt32 *numSegmentsP)
 {
-    return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
+    return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
+                           offsetP, segmentsP, numSegmentsP));
 }
 
 IOReturn
-IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
+IODMACommand::genIOVMSegments(uint32_t op,
+                             InternalSegmentFunction outSegFunc,
                              void   *reference,
                              UInt64 *offsetP,
                              void   *segmentsP,
                              UInt32 *numSegmentsP)
 {
-    IOOptionBits           op = (IOOptionBits) reference;
     IODMACommandInternal * internalState = fInternalState;
     IOOptionBits           mdOp = kIOMDWalkSegments;
     IOReturn               ret  = kIOReturnSuccess;
@@ -711,25 +1125,27 @@ IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
        return kIOReturnBadArgument;
 
     IOMDDMAWalkSegmentArgs *state =
-       (IOMDDMAWalkSegmentArgs *) fState;
+       (IOMDDMAWalkSegmentArgs *)(void *) fState;
 
-    UInt64 offset = *offsetP + internalState->fPreparedOffset;
+    UInt64 offset    = *offsetP + internalState->fPreparedOffset;
     UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
 
     if (offset >= memLength)
        return kIOReturnOverrun;
 
-    if (!offset || offset != state->fOffset) {
-       state->fOffset   = 0;
-       state->fIOVMAddr = 0;
-       state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
-       mdOp = kIOMDFirstSegment;
+    if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
+       state->fOffset                                   = 0;
+       internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
+       internalState->fNextRemapPage                    = NULL;
+       internalState->fNewMD                            = false;
+       state->fMapped                                   = (0 != fMapper);
+       mdOp                                             = kIOMDFirstSegment;
     };
        
-    UInt64    bypassMask = fBypassMask;
     UInt32    segIndex = 0;
     UInt32    numSegments = *numSegmentsP;
     Segment64 curSeg = { 0, 0 };
+    bool      curSegValid = false;
     addr64_t  maxPhys;
 
     if (fNumAddressBits && (fNumAddressBits < 64))
@@ -738,20 +1154,37 @@ IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
        maxPhys = 0;
     maxPhys--;
 
-    while ((state->fIOVMAddr) || state->fOffset < memLength)
+    while (internalState->fIOVMAddrValid || (state->fOffset < memLength))
     {
-        if (!state->fIOVMAddr) {
+       // state = next seg
+       if (!internalState->fIOVMAddrValid) {
 
            IOReturn rtn;
 
            state->fOffset = offset;
            state->fLength = memLength - offset;
 
-           if (internalState->fCopyContig && (kWalkClient & op))
+           if (internalState->fMapContig && internalState->fLocalMapperAllocValid)
            {
-               state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc) 
-                                           + offset - internalState->fPreparedOffset;
+               state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset;
                rtn = kIOReturnSuccess;
+#if 0
+               {
+                   uint64_t checkOffset;
+                   IOPhysicalLength segLen;
+                   for (checkOffset = 0; checkOffset < state->fLength; )
+                   {
+                       addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone);
+                       if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys)
+                       {
+                           panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, 
+                                   state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, 
+                                   phys, checkOffset);
+                       }
+                       checkOffset += page_size - (phys & page_mask);
+                   }
+               }
+#endif
            }
            else
            {
@@ -761,114 +1194,185 @@ IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
                mdOp = kIOMDWalkSegments;
            }
 
-           if (rtn == kIOReturnSuccess) {
-               assert(state->fIOVMAddr);
+           if (rtn == kIOReturnSuccess)
+           {
+               internalState->fIOVMAddrValid = true;
                assert(state->fLength);
+               if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) {
+                   UInt64 length = state->fLength;
+                   offset          += length;
+                   curSeg.fLength  += length;
+                   internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
+               }
            }
            else if (rtn == kIOReturnOverrun)
-               state->fIOVMAddr = state->fLength = 0;  // At end
+               internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0;  // At end
            else
                return rtn;
-        };
-
-        if (!curSeg.fIOVMAddr) {
-           UInt64 length = state->fLength;
-
-            offset          += length;
-            curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
-            curSeg.fLength   = length;
-            state->fIOVMAddr = 0;
-        }
-        else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
-           UInt64 length = state->fLength;
-            offset          += length;
-            curSeg.fLength  += length;
-            state->fIOVMAddr = 0;
-        };
+       }
 
+       // seg = state, offset = end of seg
+       if (!curSegValid)
+       {
+           UInt64 length                 = state->fLength;
+           offset                       += length;
+           curSeg.fIOVMAddr              = state->fIOVMAddr;
+           curSeg.fLength                = length;
+           curSegValid                   = true;
+           internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
+       }
 
-        if (!state->fIOVMAddr)
+        if (!internalState->fIOVMAddrValid)
        {
-           if (kWalkClient & op)
+           // maxPhys
+           if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
            {
-               if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
+               if (internalState->fCursor)
+               {
+                   curSegValid = curSeg.fIOVMAddr = 0;
+                   ret = kIOReturnMessageTooLarge;
+                   break;
+               }
+               else if (curSeg.fIOVMAddr <= maxPhys)
+               {
+                   UInt64 remain, newLength;
+
+                   newLength        = (maxPhys + 1 - curSeg.fIOVMAddr);
+                   DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
+                   remain           = curSeg.fLength - newLength;
+                   state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
+                   internalState->fIOVMAddrValid = true;
+                   curSeg.fLength   = newLength;
+                   state->fLength   = remain;
+                   offset          -= remain;
+               }
+               else 
                {
-                   if (internalState->fCursor)
+                   UInt64    addr = curSeg.fIOVMAddr;
+                   ppnum_t   addrPage = atop_64(addr);
+                   vm_page_t remap = NULL;
+                   UInt64    remain, newLength;
+
+                   DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
+
+                   remap = internalState->fNextRemapPage;
+                   if (remap && (addrPage == vm_page_get_offset(remap)))
                    {
-                       curSeg.fIOVMAddr = 0;
-                       ret = kIOReturnMessageTooLarge;
-                       break;
                    }
-                   else if (curSeg.fIOVMAddr <= maxPhys)
+                   else for (remap = internalState->fCopyPageAlloc; 
+                               remap && (addrPage != vm_page_get_offset(remap));
+                               remap = vm_page_get_next(remap))
                    {
-                       UInt64 remain, newLength;
-
-                       newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
-                       DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
-                       remain = curSeg.fLength - newLength;
-                       state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
-                       curSeg.fLength   = newLength;
-                       state->fLength   = remain;
-                       offset          -= remain;
                    }
-                   else if (gIOCopyMapper)
+
+                   if (!remap) panic("no remap page found");
+
+                   curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
+                                       + (addr & PAGE_MASK);
+                   curSegValid = true;
+                   internalState->fNextRemapPage = vm_page_get_next(remap);
+
+                   newLength            = PAGE_SIZE - (addr & PAGE_MASK);
+                   if (newLength < curSeg.fLength)
                    {
-                       DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
-                       // Cache this!
-                       for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
-                       {
-                           if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
-                                                           ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
-                           {
-                               curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex) + (curSeg.fIOVMAddr & PAGE_MASK);
-                               break;
-                           }
-                       }
-                       DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
+                       remain           = curSeg.fLength - newLength;
+                       state->fIOVMAddr = addr + newLength;
+                       internalState->fIOVMAddrValid = true;
+                       curSeg.fLength   = newLength;
+                       state->fLength   = remain;
+                       offset          -= remain;
                    }
+                   DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
                }
            }
 
+           // reduce size of output segment
+           uint64_t reduce, leftover = 0;
+
+           // fMaxSegmentSize
            if (curSeg.fLength > fMaxSegmentSize)
            {
-               UInt64 remain = curSeg.fLength - fMaxSegmentSize;
+               leftover      += curSeg.fLength - fMaxSegmentSize;
+               curSeg.fLength = fMaxSegmentSize;
+               state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
+               internalState->fIOVMAddrValid = true;
+           }
+
+           // alignment current length
+
+           reduce = (curSeg.fLength & fAlignMaskLength);
+           if (reduce && (curSeg.fLength > reduce)) 
+           {
+               leftover       += reduce;
+               curSeg.fLength -= reduce;
+               state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
+               internalState->fIOVMAddrValid = true;
+           }
 
-               state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
-               curSeg.fLength   = fMaxSegmentSize;
+           // alignment next address
 
-               state->fLength   = remain;
-               offset          -= remain;
+           reduce = (state->fIOVMAddr & fAlignMaskInternalSegments);
+           if (reduce && (curSeg.fLength > reduce))
+           {
+               leftover       += reduce;
+               curSeg.fLength -= reduce;
+               state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
+               internalState->fIOVMAddrValid = true;
            }
 
-           if (internalState->fCursor
-               && (0 != (fAlignMask & curSeg.fIOVMAddr)))
+           if (leftover)
            {
-               curSeg.fIOVMAddr = 0;
-               ret = kIOReturnNotAligned;
-               break;
+               DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n", 
+                     leftover, offset,
+                     curSeg.fIOVMAddr, curSeg.fLength);
+               state->fLength   = leftover;
+               offset          -= leftover;
+           }
+
+           // 
+
+           if (internalState->fCursor)
+           {
+               bool misaligned;
+               uint32_t mask;
+
+               mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask);
+               misaligned = (0 != (mask & curSeg.fIOVMAddr));
+               if (!misaligned)
+               {
+                   mask = fAlignMaskLength;
+                   misaligned |= (0 != (mask &  curSeg.fLength));
+               }
+               if (misaligned)
+               {
+                   if (misaligned) DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength);
+                   curSegValid = curSeg.fIOVMAddr = 0;
+                   ret = kIOReturnNotAligned;
+                   break;
+               }
            }
 
            if (offset >= memLength)
            {
                curSeg.fLength   -= (offset - memLength);
                offset = memLength;
-               state->fIOVMAddr = state->fLength = 0;  // At end
+               internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0;  // At end
                break;
            }
        }
 
-        if (state->fIOVMAddr) {
+        if (internalState->fIOVMAddrValid) {
             if ((segIndex + 1 == numSegments))
                 break;
 
            ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
-            curSeg.fIOVMAddr = 0;
+            curSegValid = curSeg.fIOVMAddr = 0;
            if (kIOReturnSuccess != ret)
                break;
         }
     }
 
-    if (curSeg.fIOVMAddr) {
+    if (curSegValid) {
        ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
     }
 
@@ -886,16 +1390,18 @@ IODMACommand::clientOutputSegment(
        void *reference, IODMACommand *target,
        Segment64 segment, void *vSegList, UInt32 outSegIndex)
 {
+    SegmentFunction segmentFunction = (SegmentFunction) reference;
     IOReturn ret = kIOReturnSuccess;
 
-    if ((target->fNumAddressBits < 64) 
-       && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
+    if (target->fNumAddressBits && (target->fNumAddressBits < 64) 
+       && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
+       && (target->reserved->fLocalMapperAllocValid || !target->fMapper))
     {
        DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
        ret = kIOReturnMessageTooLarge;
     }
 
-    if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
+    if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
     {
        DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
        ret = kIOReturnMessageTooLarge;
@@ -904,6 +1410,16 @@ IODMACommand::clientOutputSegment(
     return (ret);
 }
 
+IOReturn
+IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
+                                   UInt64   *offsetP,
+                                   void     *segmentsP,
+                                   UInt32   *numSegmentsP)
+{
+    return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
+                           offsetP, segmentsP, numSegmentsP));
+}
+
 bool 
 IODMACommand::OutputHost32(IODMACommand *,
        Segment64 segment, void *vSegList, UInt32 outSegIndex)