+#endif /* !__LP64__ */
+
+/*
+ * initWithOptions:
+ *
+ * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
+ * from a given task, several physical ranges, an UPL from the ubc
+ * system or a uio (may be 64bit) from the BSD subsystem.
+ *
+ * Passing the ranges as a reference will avoid an extra allocation.
+ *
+ * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
+ * existing instance -- note this behavior is not commonly supported in other
+ * I/O Kit classes, although it is supported here.
+ */
+
+bool
+IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits options,
+ IOMapper * mapper)
+{
+ IOOptionBits type = options & kIOMemoryTypeMask;
+
+#ifndef __LP64__
+ if (task
+ && (kIOMemoryTypeVirtual == type)
+ && vm_map_is_64bit(get_task_map(task))
+ && ((IOVirtualRange *) buffers)->address)
+ {
+ OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
+ return false;
+ }
+#endif /* !__LP64__ */
+
+ // Grab the original MD's configuation data to initialse the
+ // arguments to this function.
+ if (kIOMemoryTypePersistentMD == type) {
+
+ IOMDPersistentInitData *initData = (typeof(initData)) buffers;
+ const IOGeneralMemoryDescriptor *orig = initData->fMD;
+ ioGMDData *dataP = getDataP(orig->_memoryEntries);
+
+ // Only accept persistent memory descriptors with valid dataP data.
+ assert(orig->_rangesCount == 1);
+ if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
+ return false;
+
+ _memRef = initData->fMemRef; // Grab the new named entry
+ options = orig->_flags & ~kIOMemoryAsReference;
+ type = options & kIOMemoryTypeMask;
+ buffers = orig->_ranges.v;
+ count = orig->_rangesCount;
+
+ // Now grab the original task and whatever mapper was previously used
+ task = orig->_task;
+ mapper = dataP->fMapper;
+
+ // We are ready to go through the original initialisation now
+ }
+
+ switch (type) {
+ case kIOMemoryTypeUIO:
+ case kIOMemoryTypeVirtual:
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+#endif /* !__LP64__ */
+ assert(task);
+ if (!task)
+ return false;
+ break;
+
+ case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
+#ifndef __LP64__
+ case kIOMemoryTypePhysical64:
+#endif /* !__LP64__ */
+ case kIOMemoryTypeUPL:
+ assert(!task);
+ break;
+ default:
+ return false; /* bad argument */
+ }
+
+ assert(buffers);
+ assert(count);
+
+ /*
+ * We can check the _initialized instance variable before having ever set
+ * it to an initial value because I/O Kit guarantees that all our instance
+ * variables are zeroed on an object's allocation.
+ */
+
+ if (_initialized) {
+ /*
+ * An existing memory descriptor is being retargeted to point to
+ * somewhere else. Clean up our present state.
+ */
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
+ {
+ while (_wireCount)
+ complete();
+ }
+ if (_ranges.v && !(kIOMemoryAsReference & _flags))
+ {
+ if (kIOMemoryTypeUIO == type)
+ uio_free((uio_t) _ranges.v);
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+#endif /* !__LP64__ */
+ else
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
+
+ options |= (kIOMemoryRedirected & _flags);
+ if (!(kIOMemoryRedirected & options))
+ {
+ if (_memRef)
+ {
+ memoryReferenceRelease(_memRef);
+ _memRef = 0;
+ }
+ if (_mappings)
+ _mappings->flushCollection();
+ }
+ }
+ else {
+ if (!super::init())
+ return false;
+ _initialized = true;
+ }
+
+ // Grab the appropriate mapper
+ if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
+ if (kIOMemoryMapperNone & options)
+ mapper = 0; // No Mapper
+ else if (mapper == kIOMapperSystem) {
+ IOMapper::checkForSystemMapper();
+ gIOSystemMapper = mapper = IOMapper::gSystem;
+ }
+
+ // Temp binary compatibility for kIOMemoryThreadSafe
+ if (kIOMemoryReserved6156215 & options)
+ {
+ options &= ~kIOMemoryReserved6156215;
+ options |= kIOMemoryThreadSafe;
+ }
+ // Remove the dynamic internal use flags from the initial setting
+ options &= ~(kIOMemoryPreparedReadOnly);
+ _flags = options;
+ _task = task;
+
+#ifndef __LP64__
+ _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
+#endif /* !__LP64__ */
+
+ __iomd_reservedA = 0;
+ __iomd_reservedB = 0;
+ _highestPage = 0;
+
+ if (kIOMemoryThreadSafe & options)
+ {
+ if (!_prepareLock)
+ _prepareLock = IOLockAlloc();
+ }
+ else if (_prepareLock)
+ {
+ IOLockFree(_prepareLock);
+ _prepareLock = NULL;
+ }
+
+ if (kIOMemoryTypeUPL == type) {
+
+ ioGMDData *dataP;
+ unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
+
+ if (!initMemoryEntries(dataSize, mapper)) return (false);
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = 0;
+
+ // _wireCount++; // UPLs start out life wired
+
+ _length = count;
+ _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
+
+ ioPLBlock iopl;
+ iopl.fIOPL = (upl_t) buffers;
+ upl_set_referenced(iopl.fIOPL, true);
+ upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
+
+ if (upl_get_size(iopl.fIOPL) < (count + offset))
+ panic("short external upl");
+
+ _highestPage = upl_get_highest_page(iopl.fIOPL);
+
+ // Set the flag kIOPLOnDevice convieniently equal to 1
+ iopl.fFlags = pageList->device | kIOPLExternUPL;
+ if (!pageList->device) {
+ // Pre-compute the offset into the UPL's page list
+ pageList = &pageList[atop_32(offset)];
+ offset &= PAGE_MASK;
+ }
+ iopl.fIOMDOffset = 0;
+ iopl.fMappedPage = 0;
+ iopl.fPageInfo = (vm_address_t) pageList;
+ iopl.fPageOffset = offset;
+ _memoryEntries->appendBytes(&iopl, sizeof(iopl));
+ }
+ else {
+ // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
+ // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
+
+ // Initialize the memory descriptor
+ if (options & kIOMemoryAsReference) {
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+
+ // Hack assignment to get the buffer arg into _ranges.
+ // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
+ // work, C++ sigh.
+ // This also initialises the uio & physical ranges.
+ _ranges.v = (IOVirtualRange *) buffers;
+ }
+ else {
+#ifndef __LP64__
+ _rangesIsAllocated = true;
+#endif /* !__LP64__ */
+ switch (type)
+ {
+ case kIOMemoryTypeUIO:
+ _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
+ break;
+
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+ case kIOMemoryTypePhysical64:
+ if (count == 1
+ && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
+ ) {
+ if (kIOMemoryTypeVirtual64 == type)
+ type = kIOMemoryTypeVirtual;
+ else
+ type = kIOMemoryTypePhysical;
+ _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
+ _rangesIsAllocated = false;
+ _ranges.v = &_singleRange.v;
+ _singleRange.v.address = ((IOAddressRange *) buffers)->address;
+ _singleRange.v.length = ((IOAddressRange *) buffers)->length;
+ break;
+ }
+ _ranges.v64 = IONew(IOAddressRange, count);
+ if (!_ranges.v64)
+ return false;
+ bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
+ break;
+#endif /* !__LP64__ */
+ case kIOMemoryTypeVirtual:
+ case kIOMemoryTypePhysical:
+ if (count == 1) {
+ _flags |= kIOMemoryAsReference;
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+ _ranges.v = &_singleRange.v;
+ } else {
+ _ranges.v = IONew(IOVirtualRange, count);
+ if (!_ranges.v)
+ return false;
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
+ break;
+ }
+ }
+
+ // Find starting address within the vector of ranges
+ Ranges vec = _ranges;
+ mach_vm_size_t totalLength = 0;
+ unsigned int ind, pages = 0;
+ for (ind = 0; ind < count; ind++) {
+ mach_vm_address_t addr;
+ mach_vm_size_t len;
+
+ // addr & len are returned by this function
+ getAddrLenForInd(addr, len, type, vec, ind);
+ if ((addr + len + PAGE_MASK) < addr) break; /* overflow */
+ pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
+ totalLength += len;
+ if (totalLength < len) break; /* overflow */
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
+ {
+ ppnum_t highPage = atop_64(addr + len - 1);
+ if (highPage > _highestPage)
+ _highestPage = highPage;
+ }
+ }
+ if ((ind < count)
+ || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
+
+ _length = totalLength;
+ _pages = pages;
+ _rangesCount = count;
+
+ // Auto-prepare memory at creation time.
+ // Implied completion when descriptor is free-ed
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
+ _wireCount++; // Physical MDs are, by definition, wired
+ else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
+ ioGMDData *dataP;
+ unsigned dataSize;
+
+ if (_pages > atop_64(max_mem)) return false;
+
+ dataSize = computeDataSize(_pages, /* upls */ count * 2);
+ if (!initMemoryEntries(dataSize, mapper)) return false;
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = _pages;
+
+ if ( (kIOMemoryPersistent & _flags) && !_memRef)
+ {
+ IOReturn
+ err = memoryReferenceCreate(0, &_memRef);
+ if (kIOReturnSuccess != err) return false;
+ }
+
+ if ((_flags & kIOMemoryAutoPrepare)
+ && prepare() != kIOReturnSuccess)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * free
+ *
+ * Free resources.
+ */
+void IOGeneralMemoryDescriptor::free()
+{
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ if( reserved)
+ {
+ LOCK;
+ reserved->dp.memory = 0;
+ UNLOCK;
+ }
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
+ {
+ ioGMDData * dataP;
+ if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
+ {
+ dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
+ dataP->fMappedBase = 0;
+ }
+ }
+ else
+ {
+ while (_wireCount) complete();
+ }
+
+ if (_memoryEntries) _memoryEntries->release();
+
+ if (_ranges.v && !(kIOMemoryAsReference & _flags))
+ {
+ if (kIOMemoryTypeUIO == type)
+ uio_free((uio_t) _ranges.v);
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+#endif /* !__LP64__ */
+ else
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+
+ _ranges.v = NULL;
+ }
+
+ if (reserved)
+ {
+ if (reserved->dp.devicePager)
+ {
+ // memEntry holds a ref on the device pager which owns reserved
+ // (IOMemoryDescriptorReserved) so no reserved access after this point
+ device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
+ }
+ else
+ IODelete(reserved, IOMemoryDescriptorReserved, 1);
+ reserved = NULL;
+ }
+
+ if (_memRef) memoryReferenceRelease(_memRef);
+ if (_prepareLock) IOLockFree(_prepareLock);
+
+ super::free();
+}
+
+#ifndef __LP64__
+void IOGeneralMemoryDescriptor::unmapFromKernel()
+{
+ panic("IOGMD::unmapFromKernel deprecated");
+}
+
+void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
+{
+ panic("IOGMD::mapIntoKernel deprecated");
+}
+#endif /* !__LP64__ */
+
+/*
+ * getDirection:
+ *
+ * Get the direction of the transfer.
+ */
+IODirection IOMemoryDescriptor::getDirection() const
+{
+#ifndef __LP64__
+ if (_direction)
+ return _direction;
+#endif /* !__LP64__ */
+ return (IODirection) (_flags & kIOMemoryDirectionMask);
+}
+
+/*
+ * getLength:
+ *
+ * Get the length of the transfer (over all ranges).
+ */
+IOByteCount IOMemoryDescriptor::getLength() const
+{
+ return _length;
+}
+
+void IOMemoryDescriptor::setTag( IOOptionBits tag )
+{
+ _tag = tag;
+}
+
+IOOptionBits IOMemoryDescriptor::getTag( void )
+{
+ return( _tag);
+}
+
+#ifndef __LP64__
+// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
+IOPhysicalAddress
+IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
+{
+ addr64_t physAddr = 0;
+
+ if( prepare() == kIOReturnSuccess) {
+ physAddr = getPhysicalSegment64( offset, length );
+ complete();
+ }
+
+ return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
+}
+#endif /* !__LP64__ */
+
+IOByteCount IOMemoryDescriptor::readBytes
+ (IOByteCount offset, void *bytes, IOByteCount length)
+{
+ addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount remaining;
+
+ // Assert that this entire I/O is withing the available range
+ assert(offset <= _length);
+ assert(offset + length <= _length);
+ if ((offset >= _length)
+ || ((offset + length) > _length)) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t srcAddr64;
+ IOByteCount srcLen;
+
+ srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
+ if (!srcAddr64)
+ break;
+
+ // Clip segment length to remaining
+ if (srcLen > remaining)
+ srcLen = remaining;
+
+ copypv(srcAddr64, dstAddr, srcLen,
+ cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
+
+ dstAddr += srcLen;
+ offset += srcLen;
+ remaining -= srcLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+
+ assert(!remaining);
+
+ return length - remaining;
+}
+
+IOByteCount IOMemoryDescriptor::writeBytes
+ (IOByteCount inoffset, const void *bytes, IOByteCount length)
+{
+ addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount remaining;
+ IOByteCount offset = inoffset;
+
+ // Assert that this entire I/O is withing the available range
+ assert(offset <= _length);
+ assert(offset + length <= _length);
+
+ assert( !(kIOMemoryPreparedReadOnly & _flags) );
+
+ if ( (kIOMemoryPreparedReadOnly & _flags)
+ || (offset >= _length)
+ || ((offset + length) > _length)) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t dstAddr64;
+ IOByteCount dstLen;
+
+ dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
+ if (!dstAddr64)
+ break;
+
+ // Clip segment length to remaining
+ if (dstLen > remaining)
+ dstLen = remaining;
+
+ if (!srcAddr) bzero_phys(dstAddr64, dstLen);
+ else
+ {
+ copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
+ cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
+ srcAddr += dstLen;
+ }
+ offset += dstLen;
+ remaining -= dstLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+
+ assert(!remaining);
+
+ if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
+
+ return length - remaining;
+}
+
+#ifndef __LP64__
+void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
+{
+ panic("IOGMD::setPosition deprecated");
+}
+#endif /* !__LP64__ */
+
+static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
+
+uint64_t
+IOGeneralMemoryDescriptor::getPreparationID( void )
+{
+ ioGMDData *dataP;
+
+ if (!_wireCount)
+ return (kIOPreparationIDUnprepared);
+
+ if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
+ || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
+ {
+ IOMemoryDescriptor::setPreparationID();
+ return (IOMemoryDescriptor::getPreparationID());
+ }
+
+ if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
+ return (kIOPreparationIDUnprepared);
+
+ if (kIOPreparationIDUnprepared == dataP->fPreparationID)
+ {
+ dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
+ }
+ return (dataP->fPreparationID);
+}
+
+IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
+{
+ if (!reserved)
+ {
+ reserved = IONew(IOMemoryDescriptorReserved, 1);
+ if (reserved)
+ bzero(reserved, sizeof(IOMemoryDescriptorReserved));
+ }
+ return (reserved);
+}
+
+void IOMemoryDescriptor::setPreparationID( void )
+{
+ if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
+ {
+#if defined(__ppc__ )
+ reserved->preparationID = gIOMDPreparationID++;
+#else
+ reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
+#endif
+ }
+}
+
+uint64_t IOMemoryDescriptor::getPreparationID( void )
+{
+ if (reserved)
+ return (reserved->preparationID);
+ else
+ return (kIOPreparationIDUnsupported);
+}
+
+IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ IOReturn err = kIOReturnSuccess;
+ DMACommandOps params;
+ IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+ ioGMDData *dataP;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
+ if (kIOMDDMAMap == op)
+ {
+ if (dataSize < sizeof(IOMDDMAMapArgs))
+ return kIOReturnUnderrun;
+
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+ if (_memoryEntries && data->fMapper)
+ {
+ bool remap, keepMap;
+ dataP = getDataP(_memoryEntries);
+
+ if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
+ if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
+
+ keepMap = (data->fMapper == gIOSystemMapper);
+ keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
+
+ remap = (!keepMap);
+ remap |= (dataP->fDMAMapNumAddressBits < 64)
+ && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
+ remap |= (dataP->fDMAMapAlignment > page_size);
+
+ if (remap || !dataP->fMappedBase)
+ {
+// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
+ err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
+ if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBase)
+ {
+ dataP->fMappedBase = data->fAlloc;
+ dataP->fMappedLength = data->fAllocLength;
+ data->fAllocLength = 0; // IOMD owns the alloc now
+ }
+ }
+ else
+ {
+ data->fAlloc = dataP->fMappedBase;
+ data->fAllocLength = 0; // give out IOMD map
+ }
+ data->fMapContig = !dataP->fDiscontig;
+ }
+
+ return (err);
+ }
+
+ if (kIOMDAddDMAMapSpec == op)
+ {
+ if (dataSize < sizeof(IODMAMapSpecification))
+ return kIOReturnUnderrun;
+
+ IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+ if (_memoryEntries)
+ {
+ dataP = getDataP(_memoryEntries);
+ if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
+ dataP->fDMAMapNumAddressBits = data->numAddressBits;
+ if (data->alignment > dataP->fDMAMapAlignment)
+ dataP->fDMAMapAlignment = data->alignment;
+ }
+ return kIOReturnSuccess;
+ }
+
+ if (kIOMDGetCharacteristics == op) {
+
+ if (dataSize < sizeof(IOMDDMACharacteristics))
+ return kIOReturnUnderrun;
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = _length;
+ data->fSGCount = _rangesCount;
+ data->fPages = _pages;
+ data->fDirection = getDirection();
+ if (!_wireCount)
+ data->fIsPrepared = false;
+ else {
+ data->fIsPrepared = true;
+ data->fHighestPage = _highestPage;
+ if (_memoryEntries)
+ {
+ dataP = getDataP(_memoryEntries);
+ ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt count = getNumIOPL(_memoryEntries, dataP);
+ if (count == 1)
+ data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
+ }
+ }