+bool
+IOGeneralMemoryDescriptor::initWithAddress(void * address,
+ IOByteCount withLength,
+ IODirection withDirection)
+{
+ _singleRange.v.address = (vm_offset_t) address;
+ _singleRange.v.length = withLength;
+
+ return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
+ IOByteCount withLength,
+ IODirection withDirection,
+ task_t withTask)
+{
+ _singleRange.v.address = address;
+ _singleRange.v.length = withLength;
+
+ return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithPhysicalAddress(
+ IOPhysicalAddress address,
+ IOByteCount withLength,
+ IODirection withDirection )
+{
+ _singleRange.p.address = address;
+ _singleRange.p.length = withLength;
+
+ return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithPhysicalRanges(
+ IOPhysicalRange * ranges,
+ UInt32 count,
+ IODirection direction,
+ bool reference)
+{
+ IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
+
+ if (reference) {
+ mdOpts |= kIOMemoryAsReference;
+ }
+
+ return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithRanges(
+ IOVirtualRange * ranges,
+ UInt32 count,
+ IODirection direction,
+ task_t task,
+ bool reference)
+{
+ IOOptionBits mdOpts = direction;
+
+ if (reference) {
+ mdOpts |= kIOMemoryAsReference;
+ }
+
+ if (task) {
+ mdOpts |= kIOMemoryTypeVirtual;
+
+ // Auto-prepare if this is a kernel memory descriptor as very few
+ // clients bother to prepare() kernel memory.
+ // But it was not enforced so what are you going to do?
+ if (task == kernel_task) {
+ mdOpts |= kIOMemoryAutoPrepare;
+ }
+ } else {
+ mdOpts |= kIOMemoryTypePhysical;
+ }
+
+ return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
+}
+#endif /* !__LP64__ */
+
+/*
+ * initWithOptions:
+ *
+ * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
+ * from a given task, several physical ranges, an UPL from the ubc
+ * system or a uio (may be 64bit) from the BSD subsystem.
+ *
+ * Passing the ranges as a reference will avoid an extra allocation.
+ *
+ * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
+ * existing instance -- note this behavior is not commonly supported in other
+ * I/O Kit classes, although it is supported here.
+ */
+
+bool
+IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits options,
+ IOMapper * mapper)
+{
+ IOOptionBits type = options & kIOMemoryTypeMask;
+
+#ifndef __LP64__
+ if (task
+ && (kIOMemoryTypeVirtual == type)
+ && vm_map_is_64bit(get_task_map(task))
+ && ((IOVirtualRange *) buffers)->address) {
+ OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
+ return false;
+ }
+#endif /* !__LP64__ */
+
+ // Grab the original MD's configuation data to initialse the
+ // arguments to this function.
+ if (kIOMemoryTypePersistentMD == type) {
+ IOMDPersistentInitData *initData = (typeof(initData))buffers;
+ const IOGeneralMemoryDescriptor *orig = initData->fMD;
+ ioGMDData *dataP = getDataP(orig->_memoryEntries);
+
+ // Only accept persistent memory descriptors with valid dataP data.
+ assert(orig->_rangesCount == 1);
+ if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
+ return false;
+ }
+
+ _memRef = initData->fMemRef; // Grab the new named entry
+ options = orig->_flags & ~kIOMemoryAsReference;
+ type = options & kIOMemoryTypeMask;
+ buffers = orig->_ranges.v;
+ count = orig->_rangesCount;
+
+ // Now grab the original task and whatever mapper was previously used
+ task = orig->_task;
+ mapper = dataP->fMapper;
+
+ // We are ready to go through the original initialisation now
+ }
+
+ switch (type) {
+ case kIOMemoryTypeUIO:
+ case kIOMemoryTypeVirtual:
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+#endif /* !__LP64__ */
+ assert(task);
+ if (!task) {
+ return false;
+ }
+ break;
+
+ case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
+#ifndef __LP64__
+ case kIOMemoryTypePhysical64:
+#endif /* !__LP64__ */
+ case kIOMemoryTypeUPL:
+ assert(!task);
+ break;
+ default:
+ return false; /* bad argument */
+ }
+
+ assert(buffers);
+ assert(count);
+
+ /*
+ * We can check the _initialized instance variable before having ever set
+ * it to an initial value because I/O Kit guarantees that all our instance
+ * variables are zeroed on an object's allocation.
+ */
+
+ if (_initialized) {
+ /*
+ * An existing memory descriptor is being retargeted to point to
+ * somewhere else. Clean up our present state.
+ */
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
+ while (_wireCount) {
+ complete();
+ }
+ }
+ if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
+ if (kIOMemoryTypeUIO == type) {
+ uio_free((uio_t) _ranges.v);
+ }
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+ }
+#endif /* !__LP64__ */
+ else {
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
+ }
+
+ options |= (kIOMemoryRedirected & _flags);
+ if (!(kIOMemoryRedirected & options)) {
+ if (_memRef) {
+ memoryReferenceRelease(_memRef);
+ _memRef = NULL;
+ }
+ if (_mappings) {
+ _mappings->flushCollection();
+ }
+ }
+ } else {
+ if (!super::init()) {
+ return false;
+ }
+ _initialized = true;
+ }
+
+ // Grab the appropriate mapper
+ if (kIOMemoryHostOrRemote & options) {
+ options |= kIOMemoryMapperNone;
+ }
+ if (kIOMemoryMapperNone & options) {
+ mapper = NULL; // No Mapper
+ } else if (mapper == kIOMapperSystem) {
+ IOMapper::checkForSystemMapper();
+ gIOSystemMapper = mapper = IOMapper::gSystem;
+ }
+
+ // Remove the dynamic internal use flags from the initial setting
+ options &= ~(kIOMemoryPreparedReadOnly);
+ _flags = options;
+ _task = task;
+
+#ifndef __LP64__
+ _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
+#endif /* !__LP64__ */
+
+ _dmaReferences = 0;
+ __iomd_reservedA = 0;
+ __iomd_reservedB = 0;
+ _highestPage = 0;
+
+ if (kIOMemoryThreadSafe & options) {
+ if (!_prepareLock) {
+ _prepareLock = IOLockAlloc();
+ }
+ } else if (_prepareLock) {
+ IOLockFree(_prepareLock);
+ _prepareLock = NULL;
+ }
+
+ if (kIOMemoryTypeUPL == type) {
+ ioGMDData *dataP;
+ unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
+
+ if (!initMemoryEntries(dataSize, mapper)) {
+ return false;
+ }
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = 0;
+ switch (kIOMemoryDirectionMask & options) {
+ case kIODirectionOut:
+ dataP->fDMAAccess = kIODMAMapReadAccess;
+ break;
+ case kIODirectionIn:
+ dataP->fDMAAccess = kIODMAMapWriteAccess;
+ break;
+ case kIODirectionNone:
+ case kIODirectionOutIn:
+ default:
+ panic("bad dir for upl 0x%x\n", (int) options);
+ break;
+ }
+ // _wireCount++; // UPLs start out life wired
+
+ _length = count;
+ _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
+
+ ioPLBlock iopl;
+ iopl.fIOPL = (upl_t) buffers;
+ upl_set_referenced(iopl.fIOPL, true);
+ upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
+
+ if (upl_get_size(iopl.fIOPL) < (count + offset)) {
+ panic("short external upl");
+ }
+
+ _highestPage = upl_get_highest_page(iopl.fIOPL);
+ DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
+
+ // Set the flag kIOPLOnDevice convieniently equal to 1
+ iopl.fFlags = pageList->device | kIOPLExternUPL;
+ if (!pageList->device) {
+ // Pre-compute the offset into the UPL's page list
+ pageList = &pageList[atop_32(offset)];
+ offset &= PAGE_MASK;
+ }
+ iopl.fIOMDOffset = 0;
+ iopl.fMappedPage = 0;
+ iopl.fPageInfo = (vm_address_t) pageList;
+ iopl.fPageOffset = offset;
+ _memoryEntries->appendBytes(&iopl, sizeof(iopl));
+ } else {
+ // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
+ // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
+
+ // Initialize the memory descriptor
+ if (options & kIOMemoryAsReference) {
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+
+ // Hack assignment to get the buffer arg into _ranges.
+ // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
+ // work, C++ sigh.
+ // This also initialises the uio & physical ranges.
+ _ranges.v = (IOVirtualRange *) buffers;
+ } else {
+#ifndef __LP64__
+ _rangesIsAllocated = true;
+#endif /* !__LP64__ */
+ switch (type) {
+ case kIOMemoryTypeUIO:
+ _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
+ break;
+
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+ case kIOMemoryTypePhysical64:
+ if (count == 1
+#ifndef __arm__
+ && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
+#endif
+ ) {
+ if (kIOMemoryTypeVirtual64 == type) {
+ type = kIOMemoryTypeVirtual;
+ } else {
+ type = kIOMemoryTypePhysical;
+ }
+ _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
+ _rangesIsAllocated = false;
+ _ranges.v = &_singleRange.v;
+ _singleRange.v.address = ((IOAddressRange *) buffers)->address;
+ _singleRange.v.length = ((IOAddressRange *) buffers)->length;
+ break;
+ }
+ _ranges.v64 = IONew(IOAddressRange, count);
+ if (!_ranges.v64) {
+ return false;
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
+ break;
+#endif /* !__LP64__ */
+ case kIOMemoryTypeVirtual:
+ case kIOMemoryTypePhysical:
+ if (count == 1) {
+ _flags |= kIOMemoryAsReference;
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+ _ranges.v = &_singleRange.v;
+ } else {
+ _ranges.v = IONew(IOVirtualRange, count);
+ if (!_ranges.v) {
+ return false;
+ }
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
+ break;
+ }
+ }
+ _rangesCount = count;
+
+ // Find starting address within the vector of ranges
+ Ranges vec = _ranges;
+ mach_vm_size_t totalLength = 0;
+ unsigned int ind, pages = 0;
+ for (ind = 0; ind < count; ind++) {
+ mach_vm_address_t addr;
+ mach_vm_address_t endAddr;
+ mach_vm_size_t len;
+
+ // addr & len are returned by this function
+ getAddrLenForInd(addr, len, type, vec, ind);
+ if (_task) {
+ mach_vm_size_t phys_size;
+ kern_return_t kret;
+ kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
+ if (KERN_SUCCESS != kret) {
+ break;
+ }
+ if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
+ break;
+ }
+ } else {
+ if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
+ break;
+ }
+ if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
+ break;
+ }
+ if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
+ break;
+ }
+ }
+ if (os_add_overflow(totalLength, len, &totalLength)) {
+ break;
+ }
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ uint64_t highPage = atop_64(addr + len - 1);
+ if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
+ _highestPage = (ppnum_t) highPage;
+ DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
+ }
+ }
+ }
+ if ((ind < count)
+ || (totalLength != ((IOByteCount) totalLength))) {
+ return false; /* overflow */
+ }
+ _length = totalLength;
+ _pages = pages;
+
+ // Auto-prepare memory at creation time.
+ // Implied completion when descriptor is free-ed
+
+
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ _wireCount++; // Physical MDs are, by definition, wired
+ } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
+ ioGMDData *dataP;
+ unsigned dataSize;
+
+ if (_pages > atop_64(max_mem)) {
+ return false;
+ }
+
+ dataSize = computeDataSize(_pages, /* upls */ count * 2);
+ if (!initMemoryEntries(dataSize, mapper)) {
+ return false;
+ }
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = _pages;
+
+ if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
+ && (VM_KERN_MEMORY_NONE == _kernelTag)) {
+ _kernelTag = IOMemoryTag(kernel_map);
+ if (_kernelTag == gIOSurfaceTag) {
+ _userTag = VM_MEMORY_IOSURFACE;
+ }
+ }
+
+ if ((kIOMemoryPersistent & _flags) && !_memRef) {
+ IOReturn
+ err = memoryReferenceCreate(0, &_memRef);
+ if (kIOReturnSuccess != err) {
+ return false;
+ }
+ }
+
+ if ((_flags & kIOMemoryAutoPrepare)
+ && prepare() != kIOReturnSuccess) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/*
+ * free
+ *
+ * Free resources.
+ */
+void
+IOGeneralMemoryDescriptor::free()
+{
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ if (reserved && reserved->dp.memory) {
+ LOCK;
+ reserved->dp.memory = NULL;
+ UNLOCK;
+ }
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ ioGMDData * dataP;
+ if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
+ dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
+ dataP->fMappedBaseValid = dataP->fMappedBase = 0;
+ }
+ } else {
+ while (_wireCount) {
+ complete();
+ }
+ }
+
+ if (_memoryEntries) {
+ _memoryEntries.reset();
+ }
+
+ if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
+ if (kIOMemoryTypeUIO == type) {
+ uio_free((uio_t) _ranges.v);
+ }
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+ }
+#endif /* !__LP64__ */
+ else {
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
+
+ _ranges.v = NULL;
+ }
+
+ if (reserved) {
+ cleanKernelReserved(reserved);
+ if (reserved->dp.devicePager) {
+ // memEntry holds a ref on the device pager which owns reserved
+ // (IOMemoryDescriptorReserved) so no reserved access after this point
+ device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
+ } else {
+ IODelete(reserved, IOMemoryDescriptorReserved, 1);
+ }
+ reserved = NULL;
+ }
+
+ if (_memRef) {
+ memoryReferenceRelease(_memRef);
+ }
+ if (_prepareLock) {
+ IOLockFree(_prepareLock);
+ }
+
+ super::free();
+}
+
+#ifndef __LP64__
+void
+IOGeneralMemoryDescriptor::unmapFromKernel()
+{
+ panic("IOGMD::unmapFromKernel deprecated");
+}
+
+void
+IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
+{
+ panic("IOGMD::mapIntoKernel deprecated");
+}
+#endif /* !__LP64__ */
+
+/*
+ * getDirection:
+ *
+ * Get the direction of the transfer.
+ */
+IODirection
+IOMemoryDescriptor::getDirection() const
+{
+#ifndef __LP64__
+ if (_direction) {
+ return _direction;
+ }
+#endif /* !__LP64__ */
+ return (IODirection) (_flags & kIOMemoryDirectionMask);
+}
+
+/*
+ * getLength:
+ *
+ * Get the length of the transfer (over all ranges).
+ */
+IOByteCount
+IOMemoryDescriptor::getLength() const
+{
+ return _length;
+}
+
+void
+IOMemoryDescriptor::setTag( IOOptionBits tag )
+{
+ _tag = tag;
+}
+
+IOOptionBits
+IOMemoryDescriptor::getTag( void )
+{
+ return _tag;
+}
+
+uint64_t
+IOMemoryDescriptor::getFlags(void)
+{
+ return _flags;
+}
+
+#ifndef __LP64__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
+IOPhysicalAddress
+IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
+{
+ addr64_t physAddr = 0;
+
+ if (prepare() == kIOReturnSuccess) {
+ physAddr = getPhysicalSegment64( offset, length );
+ complete();
+ }
+
+ return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
+}
+
+#pragma clang diagnostic pop
+
+#endif /* !__LP64__ */
+
+IOByteCount
+IOMemoryDescriptor::readBytes
+(IOByteCount offset, void *bytes, IOByteCount length)
+{
+ addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount endoffset;
+ IOByteCount remaining;
+
+
+ // Check that this entire I/O is within the available range
+ if ((offset > _length)
+ || os_add_overflow(length, offset, &endoffset)
+ || (endoffset > _length)) {
+ assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
+ return 0;
+ }
+ if (offset >= _length) {
+ return 0;
+ }
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t srcAddr64;
+ IOByteCount srcLen;
+
+ srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
+ if (!srcAddr64) {
+ break;
+ }
+
+ // Clip segment length to remaining
+ if (srcLen > remaining) {
+ srcLen = remaining;
+ }
+
+ if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
+ srcLen = (UINT_MAX - PAGE_SIZE + 1);
+ }
+ copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
+ cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
+
+ dstAddr += srcLen;
+ offset += srcLen;
+ remaining -= srcLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ assert(!remaining);
+
+ return length - remaining;
+}
+
+IOByteCount
+IOMemoryDescriptor::writeBytes
+(IOByteCount inoffset, const void *bytes, IOByteCount length)
+{
+ addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount remaining;
+ IOByteCount endoffset;
+ IOByteCount offset = inoffset;
+
+ assert( !(kIOMemoryPreparedReadOnly & _flags));
+
+ // Check that this entire I/O is within the available range
+ if ((offset > _length)
+ || os_add_overflow(length, offset, &endoffset)
+ || (endoffset > _length)) {
+ assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
+ return 0;
+ }
+ if (kIOMemoryPreparedReadOnly & _flags) {
+ return 0;
+ }
+ if (offset >= _length) {
+ return 0;
+ }
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t dstAddr64;
+ IOByteCount dstLen;
+
+ dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
+ if (!dstAddr64) {
+ break;
+ }
+
+ // Clip segment length to remaining
+ if (dstLen > remaining) {
+ dstLen = remaining;
+ }
+
+ if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
+ dstLen = (UINT_MAX - PAGE_SIZE + 1);
+ }
+ if (!srcAddr) {
+ bzero_phys(dstAddr64, (unsigned int) dstLen);
+ } else {
+ copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
+ cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
+ srcAddr += dstLen;
+ }
+ offset += dstLen;
+ remaining -= dstLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ assert(!remaining);
+
+#if defined(__x86_64__)
+ // copypv does not cppvFsnk on intel
+#else
+ if (!srcAddr) {
+ performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
+ }
+#endif
+
+ return length - remaining;
+}
+
+#ifndef __LP64__
+void
+IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
+{
+ panic("IOGMD::setPosition deprecated");
+}
+#endif /* !__LP64__ */
+
+static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
+static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
+
+uint64_t
+IOGeneralMemoryDescriptor::getPreparationID( void )
+{
+ ioGMDData *dataP;
+
+ if (!_wireCount) {
+ return kIOPreparationIDUnprepared;
+ }
+
+ if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
+ || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
+ IOMemoryDescriptor::setPreparationID();
+ return IOMemoryDescriptor::getPreparationID();
+ }
+
+ if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
+ return kIOPreparationIDUnprepared;
+ }
+
+ if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
+ OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
+ }
+ return dataP->fPreparationID;
+}
+
+void
+IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
+{
+ if (reserved->creator) {
+ task_deallocate(reserved->creator);
+ reserved->creator = NULL;
+ }
+}
+
+IOMemoryDescriptorReserved *
+IOMemoryDescriptor::getKernelReserved( void )
+{
+ if (!reserved) {
+ reserved = IONewZero(IOMemoryDescriptorReserved, 1);
+ }
+ return reserved;
+}
+
+void
+IOMemoryDescriptor::setPreparationID( void )
+{
+ if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
+ OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
+ }
+}
+
+uint64_t
+IOMemoryDescriptor::getPreparationID( void )
+{
+ if (reserved) {
+ return reserved->preparationID;
+ } else {
+ return kIOPreparationIDUnsupported;
+ }
+}
+
+void
+IOMemoryDescriptor::setDescriptorID( void )
+{
+ if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
+ OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
+ }
+}
+
+uint64_t
+IOMemoryDescriptor::getDescriptorID( void )
+{
+ setDescriptorID();
+
+ if (reserved) {
+ return reserved->descriptorID;
+ } else {
+ return kIODescriptorIDInvalid;
+ }
+}
+
+IOReturn
+IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
+{
+ if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
+ return kIOReturnSuccess;
+ }
+
+ assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
+ if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
+ return kIOReturnBadArgument;
+ }
+
+ uint64_t descriptorID = getDescriptorID();
+ assert(descriptorID != kIODescriptorIDInvalid);
+ if (getDescriptorID() == kIODescriptorIDInvalid) {
+ return kIOReturnBadArgument;
+ }
+
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
+
+#if __LP64__
+ static const uint8_t num_segments_page = 8;
+#else
+ static const uint8_t num_segments_page = 4;
+#endif
+ static const uint8_t num_segments_long = 2;
+
+ IOPhysicalAddress segments_page[num_segments_page];
+ IOPhysicalRange segments_long[num_segments_long];
+ memset(segments_page, UINT32_MAX, sizeof(segments_page));
+ memset(segments_long, 0, sizeof(segments_long));
+
+ uint8_t segment_page_idx = 0;
+ uint8_t segment_long_idx = 0;
+
+ IOPhysicalRange physical_segment;
+ for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
+ physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
+
+ if (physical_segment.length == 0) {
+ break;
+ }
+
+ /**
+ * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
+ * buffer memory, pack segment events according to the following.
+ *
+ * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
+ * IOMDPA_MAPPED event emitted on by the current thread_id.
+ *
+ * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
+ * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
+ * - unmapped pages will have a ppn of MAX_INT_32
+ * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
+ * - address_0, length_0, address_0, length_1
+ * - unmapped pages will have an address of 0
+ *
+ * During each iteration do the following depending on the length of the mapping:
+ * 1. add the current segment to the appropriate queue of pending segments
+ * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
+ * 1a. if FALSE emit and reset all events in the previous queue
+ * 2. check if we have filled up the current queue of pending events
+ * 2a. if TRUE emit and reset all events in the pending queue
+ * 3. after completing all iterations emit events in the current queue
+ */
+
+ bool emit_page = false;
+ bool emit_long = false;
+ if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
+ segments_page[segment_page_idx] = physical_segment.address;
+ segment_page_idx++;
+
+ emit_long = segment_long_idx != 0;
+ emit_page = segment_page_idx == num_segments_page;
+
+ if (os_unlikely(emit_long)) {
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
+ segments_long[0].address, segments_long[0].length,
+ segments_long[1].address, segments_long[1].length);
+ }
+
+ if (os_unlikely(emit_page)) {
+#if __LP64__
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
+ ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
+ ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
+ ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
+#else
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ (ppnum_t) atop_32(segments_page[1]),
+ (ppnum_t) atop_32(segments_page[2]),
+ (ppnum_t) atop_32(segments_page[3]),
+ (ppnum_t) atop_32(segments_page[4]));
+#endif
+ }
+ } else {
+ segments_long[segment_long_idx] = physical_segment;
+ segment_long_idx++;
+
+ emit_page = segment_page_idx != 0;
+ emit_long = segment_long_idx == num_segments_long;
+
+ if (os_unlikely(emit_page)) {
+#if __LP64__
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
+ ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
+ ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
+ ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
+#else
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ (ppnum_t) atop_32(segments_page[1]),
+ (ppnum_t) atop_32(segments_page[2]),
+ (ppnum_t) atop_32(segments_page[3]),
+ (ppnum_t) atop_32(segments_page[4]));
+#endif
+ }
+
+ if (emit_long) {
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
+ segments_long[0].address, segments_long[0].length,
+ segments_long[1].address, segments_long[1].length);
+ }
+ }
+
+ if (os_unlikely(emit_page)) {
+ memset(segments_page, UINT32_MAX, sizeof(segments_page));
+ segment_page_idx = 0;
+ }
+
+ if (os_unlikely(emit_long)) {
+ memset(segments_long, 0, sizeof(segments_long));
+ segment_long_idx = 0;
+ }
+ }
+
+ if (segment_page_idx != 0) {
+ assert(segment_long_idx == 0);
+#if __LP64__
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
+ ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
+ ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
+ ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
+#else
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ (ppnum_t) atop_32(segments_page[1]),
+ (ppnum_t) atop_32(segments_page[2]),
+ (ppnum_t) atop_32(segments_page[3]),
+ (ppnum_t) atop_32(segments_page[4]));
+#endif
+ } else if (segment_long_idx != 0) {
+ assert(segment_page_idx == 0);
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
+ segments_long[0].address, segments_long[0].length,
+ segments_long[1].address, segments_long[1].length);
+ }
+
+ return kIOReturnSuccess;
+}
+
+void
+IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
+{
+ _kernelTag = (vm_tag_t) kernelTag;
+ _userTag = (vm_tag_t) userTag;
+}
+
+uint32_t
+IOMemoryDescriptor::getVMTag(vm_map_t map)
+{
+ if (vm_kernel_map_is_kernel(map)) {
+ if (VM_KERN_MEMORY_NONE != _kernelTag) {
+ return (uint32_t) _kernelTag;
+ }
+ } else {
+ if (VM_KERN_MEMORY_NONE != _userTag) {
+ return (uint32_t) _userTag;
+ }
+ }
+ return IOMemoryTag(map);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ IOReturn err = kIOReturnSuccess;
+ DMACommandOps params;
+ IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+ ioGMDData *dataP;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
+ if (kIOMDDMAMap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ if (_memoryEntries && data->fMapper) {
+ bool remap, keepMap;
+ dataP = getDataP(_memoryEntries);
+
+ if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
+ dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
+ }
+ if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
+ dataP->fDMAMapAlignment = data->fMapSpec.alignment;
+ }
+
+ keepMap = (data->fMapper == gIOSystemMapper);
+ keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
+
+ if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
+ IOLockLock(_prepareLock);
+ }
+
+ remap = (!keepMap);
+ remap |= (dataP->fDMAMapNumAddressBits < 64)
+ && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
+ remap |= (dataP->fDMAMapAlignment > page_size);
+
+ if (remap || !dataP->fMappedBaseValid) {
+ err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
+ if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
+ dataP->fMappedBase = data->fAlloc;
+ dataP->fMappedBaseValid = true;
+ dataP->fMappedLength = data->fAllocLength;
+ data->fAllocLength = 0; // IOMD owns the alloc now
+ }
+ } else {
+ data->fAlloc = dataP->fMappedBase;
+ data->fAllocLength = 0; // give out IOMD map
+ md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
+ }
+
+ if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
+ IOLockUnlock(_prepareLock);
+ }
+ }
+ return err;
+ }
+ if (kIOMDDMAUnmap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
+
+ return kIOReturnSuccess;
+ }
+
+ if (kIOMDAddDMAMapSpec == op) {
+ if (dataSize < sizeof(IODMAMapSpecification)) {
+ return kIOReturnUnderrun;
+ }
+
+ IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ if (_memoryEntries) {
+ dataP = getDataP(_memoryEntries);
+ if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
+ dataP->fDMAMapNumAddressBits = data->numAddressBits;
+ }
+ if (data->alignment > dataP->fDMAMapAlignment) {
+ dataP->fDMAMapAlignment = data->alignment;
+ }
+ }
+ return kIOReturnSuccess;
+ }
+
+ if (kIOMDGetCharacteristics == op) {
+ if (dataSize < sizeof(IOMDDMACharacteristics)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = _length;
+ data->fSGCount = _rangesCount;
+ data->fPages = _pages;
+ data->fDirection = getDirection();
+ if (!_wireCount) {
+ data->fIsPrepared = false;
+ } else {
+ data->fIsPrepared = true;
+ data->fHighestPage = _highestPage;
+ if (_memoryEntries) {
+ dataP = getDataP(_memoryEntries);
+ ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt count = getNumIOPL(_memoryEntries, dataP);
+ if (count == 1) {
+ data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
+ }
+ }
+ }
+
+ return kIOReturnSuccess;
+ } else if (kIOMDDMAActive == op) {
+ if (params) {
+ int16_t prior;
+ prior = OSAddAtomic16(1, &md->_dmaReferences);
+ if (!prior) {
+ md->_mapName = NULL;
+ }
+ } else {
+ if (md->_dmaReferences) {
+ OSAddAtomic16(-1, &md->_dmaReferences);
+ } else {
+ panic("_dmaReferences underflow");
+ }
+ }
+ } else if (kIOMDWalkSegments != op) {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the next segment
+ struct InternalState {
+ IOMDDMAWalkSegmentArgs fIO;
+ mach_vm_size_t fOffset2Index;
+ mach_vm_size_t fNextOffset;
+ UInt fIndex;
+ } *isP;
+
+ // Find the next segment
+ if (dataSize < sizeof(*isP)) {
+ return kIOReturnUnderrun;
+ }
+
+ isP = (InternalState *) vData;
+ uint64_t offset = isP->fIO.fOffset;
+ uint8_t mapped = isP->fIO.fMapped;
+ uint64_t mappedBase;
+
+ if (mapped && (kIOMemoryRemote & _flags)) {
+ return kIOReturnNotAttached;
+ }
+
+ if (IOMapper::gSystem && mapped
+ && (!(kIOMemoryHostOnly & _flags))
+ && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
+// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ dataP = getDataP(_memoryEntries);
+ if (dataP->fMapper) {
+ IODMAMapSpecification mapSpec;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+ mapSpec.alignment = dataP->fDMAMapAlignment;
+ err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
+ if (kIOReturnSuccess != err) {
+ return err;
+ }
+ dataP->fMappedBaseValid = true;
+ }
+ }
+
+ if (mapped) {
+ if (IOMapper::gSystem
+ && (!(kIOMemoryHostOnly & _flags))
+ && _memoryEntries
+ && (dataP = getDataP(_memoryEntries))
+ && dataP->fMappedBaseValid) {
+ mappedBase = dataP->fMappedBase;
+ } else {
+ mapped = 0;
+ }
+ }
+
+ if (offset >= _length) {
+ return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
+ }
+
+ // Validate the previous offset
+ UInt ind;
+ mach_vm_size_t off2Ind = isP->fOffset2Index;
+ if (!params
+ && offset
+ && (offset == isP->fNextOffset || off2Ind <= offset)) {
+ ind = isP->fIndex;
+ } else {
+ ind = off2Ind = 0; // Start from beginning
+ }
+ mach_vm_size_t length;
+ UInt64 address;
+
+ if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
+ // Physical address based memory descriptor
+ const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
+
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
+
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
+
+ if (true && mapped) {
+ address = mappedBase + offset;
+ } else {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
+ }
+
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#ifndef __LP64__
+ else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
+ // Physical address based memory descriptor
+ const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
+
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
+
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
+
+ if (true && mapped) {
+ address = mappedBase + offset;
+ } else {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
+ }
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#endif /* !__LP64__ */
+ else {
+ do {
+ if (!_wireCount) {
+ panic("IOGMD: not wired for the IODMACommand");
+ }
+
+ assert(_memoryEntries);
+
+ dataP = getDataP(_memoryEntries);
+ const ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+ upl_page_info_t *pageList = getPageList(dataP);
+
+ assert(numIOPLs > 0);
+
+ // Scan through iopl info blocks looking for block containing offset
+ while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
+ ind++;
+ }
+
+ // Go back to actual range as search goes past it
+ ioPLBlock ioplInfo = ioplList[ind - 1];
+ off2Ind = ioplInfo.fIOMDOffset;
+
+ if (ind < numIOPLs) {
+ length = ioplList[ind].fIOMDOffset;
+ } else {
+ length = _length;
+ }
+ length -= offset; // Remainder within iopl
+
+ // Subtract offset till this iopl in total list
+ offset -= off2Ind;
+
+ // If a mapped address is requested and this is a pre-mapped IOPL
+ // then just need to compute an offset relative to the mapped base.
+ if (mapped) {
+ offset += (ioplInfo.fPageOffset & PAGE_MASK);
+ address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
+ continue; // Done leave do/while(false) now
+ }
+
+ // The offset is rebased into the current iopl.
+ // Now add the iopl 1st page offset.
+ offset += ioplInfo.fPageOffset;
+
+ // For external UPLs the fPageInfo field points directly to
+ // the upl's upl_page_info_t array.
+ if (ioplInfo.fFlags & kIOPLExternUPL) {
+ pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
+ } else {
+ pageList = &pageList[ioplInfo.fPageInfo];
+ }
+
+ // Check for direct device non-paged memory
+ if (ioplInfo.fFlags & kIOPLOnDevice) {
+ address = ptoa_64(pageList->phys_addr) + offset;
+ continue; // Done leave do/while(false) now
+ }
+
+ // Now we need compute the index into the pageList
+ UInt pageInd = atop_32(offset);
+ offset &= PAGE_MASK;
+
+ // Compute the starting address of this segment
+ IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
+ if (!pageAddr) {
+ panic("!pageList phys_addr");
+ }
+
+ address = ptoa_64(pageAddr) + offset;
+
+ // length is currently set to the length of the remainider of the iopl.
+ // We need to check that the remainder of the iopl is contiguous.
+ // This is indicated by pageList[ind].phys_addr being sequential.
+ IOByteCount contigLength = PAGE_SIZE - offset;
+ while (contigLength < length
+ && ++pageAddr == pageList[++pageInd].phys_addr) {
+ contigLength += PAGE_SIZE;
+ }
+
+ if (contigLength < length) {
+ length = contigLength;
+ }
+