+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddress(void * address,
+ IOByteCount length,
+ IODirection direction)
+{
+ return IOMemoryDescriptor::
+ withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
+}
+
+#ifndef __LP64__
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddress(IOVirtualAddress address,
+ IOByteCount length,
+ IODirection direction,
+ task_t task)
+{
+ IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
+ if (that) {
+ if (that->initWithAddress(address, length, direction, task)) {
+ return that;
+ }
+
+ that->release();
+ }
+ return NULL;
+}
+#endif /* !__LP64__ */
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withPhysicalAddress(
+ IOPhysicalAddress address,
+ IOByteCount length,
+ IODirection direction )
+{
+ return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
+}
+
+#ifndef __LP64__
+IOMemoryDescriptor *
+IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
+ UInt32 withCount,
+ IODirection direction,
+ task_t task,
+ bool asReference)
+{
+ IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
+ if (that) {
+ if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
+ return that;
+ }
+
+ that->release();
+ }
+ return NULL;
+}
+#endif /* !__LP64__ */
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
+ mach_vm_size_t length,
+ IOOptionBits options,
+ task_t task)
+{
+ IOAddressRange range = { address, length };
+ return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
+}
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
+ UInt32 rangeCount,
+ IOOptionBits options,
+ task_t task)
+{
+ IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
+ if (that) {
+ if (task) {
+ options |= kIOMemoryTypeVirtual64;
+ } else {
+ options |= kIOMemoryTypePhysical64;
+ }
+
+ if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
+ return that;
+ }
+
+ that->release();
+ }
+
+ return NULL;
+}
+
+
+/*
+ * withOptions:
+ *
+ * Create a new IOMemoryDescriptor. The buffer is made up of several
+ * virtual address ranges, from a given task.
+ *
+ * Passing the ranges as a reference will avoid an extra allocation.
+ */
+IOMemoryDescriptor *
+IOMemoryDescriptor::withOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits opts,
+ IOMapper * mapper)
+{
+ IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
+
+ if (self
+ && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
+ self->release();
+ return NULL;
+ }
+
+ return self;
+}
+
+bool
+IOMemoryDescriptor::initWithOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits options,
+ IOMapper * mapper)
+{
+ return false;
+}
+
+#ifndef __LP64__
+IOMemoryDescriptor *
+IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
+ UInt32 withCount,
+ IODirection direction,
+ bool asReference)
+{
+ IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
+ if (that) {
+ if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
+ return that;
+ }
+
+ that->release();
+ }
+ return NULL;
+}
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
+ IOByteCount offset,
+ IOByteCount length,
+ IODirection direction)
+{
+ return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
+}
+#endif /* !__LP64__ */
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
+{
+ IOGeneralMemoryDescriptor *origGenMD =
+ OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
+
+ if (origGenMD) {
+ return IOGeneralMemoryDescriptor::
+ withPersistentMemoryDescriptor(origGenMD);
+ } else {
+ return NULL;
+ }
+}
+
+IOMemoryDescriptor *
+IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
+{
+ IOMemoryReference * memRef;
+
+ if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
+ return NULL;
+ }
+
+ if (memRef == originalMD->_memRef) {
+ originalMD->retain(); // Add a new reference to ourselves
+ originalMD->memoryReferenceRelease(memRef);
+ return originalMD;
+ }
+
+ IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
+ IOMDPersistentInitData initData = { originalMD, memRef };
+
+ if (self
+ && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
+ self->release();
+ self = NULL;
+ }
+ return self;
+}
+
+#ifndef __LP64__
+bool
+IOGeneralMemoryDescriptor::initWithAddress(void * address,
+ IOByteCount withLength,
+ IODirection withDirection)
+{
+ _singleRange.v.address = (vm_offset_t) address;
+ _singleRange.v.length = withLength;
+
+ return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
+ IOByteCount withLength,
+ IODirection withDirection,
+ task_t withTask)
+{
+ _singleRange.v.address = address;
+ _singleRange.v.length = withLength;
+
+ return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithPhysicalAddress(
+ IOPhysicalAddress address,
+ IOByteCount withLength,
+ IODirection withDirection )
+{
+ _singleRange.p.address = address;
+ _singleRange.p.length = withLength;
+
+ return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithPhysicalRanges(
+ IOPhysicalRange * ranges,
+ UInt32 count,
+ IODirection direction,
+ bool reference)
+{
+ IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
+
+ if (reference) {
+ mdOpts |= kIOMemoryAsReference;
+ }
+
+ return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithRanges(
+ IOVirtualRange * ranges,
+ UInt32 count,
+ IODirection direction,
+ task_t task,
+ bool reference)
+{
+ IOOptionBits mdOpts = direction;
+
+ if (reference) {
+ mdOpts |= kIOMemoryAsReference;
+ }
+
+ if (task) {
+ mdOpts |= kIOMemoryTypeVirtual;
+
+ // Auto-prepare if this is a kernel memory descriptor as very few
+ // clients bother to prepare() kernel memory.
+ // But it was not enforced so what are you going to do?
+ if (task == kernel_task) {
+ mdOpts |= kIOMemoryAutoPrepare;
+ }
+ } else {
+ mdOpts |= kIOMemoryTypePhysical;
+ }
+
+ return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
+}
+#endif /* !__LP64__ */
+
+/*
+ * initWithOptions:
+ *
+ * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
+ * from a given task, several physical ranges, an UPL from the ubc
+ * system or a uio (may be 64bit) from the BSD subsystem.
+ *
+ * Passing the ranges as a reference will avoid an extra allocation.
+ *
+ * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
+ * existing instance -- note this behavior is not commonly supported in other
+ * I/O Kit classes, although it is supported here.
+ */
+
+bool
+IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits options,
+ IOMapper * mapper)
+{
+ IOOptionBits type = options & kIOMemoryTypeMask;
+
+#ifndef __LP64__
+ if (task
+ && (kIOMemoryTypeVirtual == type)
+ && vm_map_is_64bit(get_task_map(task))
+ && ((IOVirtualRange *) buffers)->address) {
+ OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
+ return false;
+ }
+#endif /* !__LP64__ */
+
+ // Grab the original MD's configuation data to initialse the
+ // arguments to this function.
+ if (kIOMemoryTypePersistentMD == type) {
+ IOMDPersistentInitData *initData = (typeof(initData))buffers;
+ const IOGeneralMemoryDescriptor *orig = initData->fMD;
+ ioGMDData *dataP = getDataP(orig->_memoryEntries);
+
+ // Only accept persistent memory descriptors with valid dataP data.
+ assert(orig->_rangesCount == 1);
+ if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
+ return false;
+ }
+
+ _memRef = initData->fMemRef; // Grab the new named entry
+ options = orig->_flags & ~kIOMemoryAsReference;
+ type = options & kIOMemoryTypeMask;
+ buffers = orig->_ranges.v;
+ count = orig->_rangesCount;
+
+ // Now grab the original task and whatever mapper was previously used
+ task = orig->_task;
+ mapper = dataP->fMapper;
+
+ // We are ready to go through the original initialisation now
+ }
+
+ switch (type) {
+ case kIOMemoryTypeUIO:
+ case kIOMemoryTypeVirtual:
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+#endif /* !__LP64__ */
+ assert(task);
+ if (!task) {
+ return false;
+ }
+ break;
+
+ case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
+#ifndef __LP64__
+ case kIOMemoryTypePhysical64:
+#endif /* !__LP64__ */
+ case kIOMemoryTypeUPL:
+ assert(!task);
+ break;
+ default:
+ return false; /* bad argument */
+ }
+
+ assert(buffers);
+ assert(count);
+
+ /*
+ * We can check the _initialized instance variable before having ever set
+ * it to an initial value because I/O Kit guarantees that all our instance
+ * variables are zeroed on an object's allocation.
+ */
+
+ if (_initialized) {
+ /*
+ * An existing memory descriptor is being retargeted to point to
+ * somewhere else. Clean up our present state.
+ */
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
+ while (_wireCount) {
+ complete();
+ }
+ }
+ if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
+ if (kIOMemoryTypeUIO == type) {
+ uio_free((uio_t) _ranges.v);
+ }
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+ }
+#endif /* !__LP64__ */
+ else {
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
+ }
+
+ options |= (kIOMemoryRedirected & _flags);
+ if (!(kIOMemoryRedirected & options)) {
+ if (_memRef) {
+ memoryReferenceRelease(_memRef);
+ _memRef = NULL;
+ }
+ if (_mappings) {
+ _mappings->flushCollection();
+ }
+ }
+ } else {
+ if (!super::init()) {
+ return false;
+ }
+ _initialized = true;
+ }
+
+ // Grab the appropriate mapper
+ if (kIOMemoryHostOrRemote & options) {
+ options |= kIOMemoryMapperNone;
+ }
+ if (kIOMemoryMapperNone & options) {
+ mapper = NULL; // No Mapper
+ } else if (mapper == kIOMapperSystem) {
+ IOMapper::checkForSystemMapper();
+ gIOSystemMapper = mapper = IOMapper::gSystem;
+ }
+
+ // Remove the dynamic internal use flags from the initial setting
+ options &= ~(kIOMemoryPreparedReadOnly);
+ _flags = options;
+ _task = task;
+
+#ifndef __LP64__
+ _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
+#endif /* !__LP64__ */
+
+ _dmaReferences = 0;
+ __iomd_reservedA = 0;
+ __iomd_reservedB = 0;
+ _highestPage = 0;
+
+ if (kIOMemoryThreadSafe & options) {
+ if (!_prepareLock) {
+ _prepareLock = IOLockAlloc();
+ }
+ } else if (_prepareLock) {
+ IOLockFree(_prepareLock);
+ _prepareLock = NULL;
+ }
+
+ if (kIOMemoryTypeUPL == type) {
+ ioGMDData *dataP;
+ unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
+
+ if (!initMemoryEntries(dataSize, mapper)) {
+ return false;
+ }
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = 0;
+ switch (kIOMemoryDirectionMask & options) {
+ case kIODirectionOut:
+ dataP->fDMAAccess = kIODMAMapReadAccess;
+ break;
+ case kIODirectionIn:
+ dataP->fDMAAccess = kIODMAMapWriteAccess;
+ break;
+ case kIODirectionNone:
+ case kIODirectionOutIn:
+ default:
+ panic("bad dir for upl 0x%x\n", (int) options);
+ break;
+ }
+ // _wireCount++; // UPLs start out life wired
+
+ _length = count;
+ _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
+
+ ioPLBlock iopl;
+ iopl.fIOPL = (upl_t) buffers;
+ upl_set_referenced(iopl.fIOPL, true);
+ upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
+
+ if (upl_get_size(iopl.fIOPL) < (count + offset)) {
+ panic("short external upl");
+ }
+
+ _highestPage = upl_get_highest_page(iopl.fIOPL);
+
+ // Set the flag kIOPLOnDevice convieniently equal to 1
+ iopl.fFlags = pageList->device | kIOPLExternUPL;
+ if (!pageList->device) {
+ // Pre-compute the offset into the UPL's page list
+ pageList = &pageList[atop_32(offset)];
+ offset &= PAGE_MASK;
+ }
+ iopl.fIOMDOffset = 0;
+ iopl.fMappedPage = 0;
+ iopl.fPageInfo = (vm_address_t) pageList;
+ iopl.fPageOffset = offset;
+ _memoryEntries->appendBytes(&iopl, sizeof(iopl));
+ } else {
+ // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
+ // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
+
+ // Initialize the memory descriptor
+ if (options & kIOMemoryAsReference) {
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+
+ // Hack assignment to get the buffer arg into _ranges.
+ // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
+ // work, C++ sigh.
+ // This also initialises the uio & physical ranges.
+ _ranges.v = (IOVirtualRange *) buffers;
+ } else {
+#ifndef __LP64__
+ _rangesIsAllocated = true;
+#endif /* !__LP64__ */
+ switch (type) {
+ case kIOMemoryTypeUIO:
+ _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
+ break;
+
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+ case kIOMemoryTypePhysical64:
+ if (count == 1
+#ifndef __arm__
+ && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
+#endif
+ ) {
+ if (kIOMemoryTypeVirtual64 == type) {
+ type = kIOMemoryTypeVirtual;
+ } else {
+ type = kIOMemoryTypePhysical;
+ }
+ _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
+ _rangesIsAllocated = false;
+ _ranges.v = &_singleRange.v;
+ _singleRange.v.address = ((IOAddressRange *) buffers)->address;
+ _singleRange.v.length = ((IOAddressRange *) buffers)->length;
+ break;
+ }
+ _ranges.v64 = IONew(IOAddressRange, count);
+ if (!_ranges.v64) {
+ return false;
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
+ break;
+#endif /* !__LP64__ */
+ case kIOMemoryTypeVirtual:
+ case kIOMemoryTypePhysical:
+ if (count == 1) {
+ _flags |= kIOMemoryAsReference;
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+ _ranges.v = &_singleRange.v;
+ } else {
+ _ranges.v = IONew(IOVirtualRange, count);
+ if (!_ranges.v) {
+ return false;
+ }
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
+ break;
+ }
+ }
+ _rangesCount = count;
+
+ // Find starting address within the vector of ranges
+ Ranges vec = _ranges;
+ mach_vm_size_t totalLength = 0;
+ unsigned int ind, pages = 0;
+ for (ind = 0; ind < count; ind++) {
+ mach_vm_address_t addr;
+ mach_vm_address_t endAddr;
+ mach_vm_size_t len;
+
+ // addr & len are returned by this function
+ getAddrLenForInd(addr, len, type, vec, ind);
+ if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
+ break;
+ }
+ if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
+ break;
+ }
+ if (os_add_overflow(totalLength, len, &totalLength)) {
+ break;
+ }
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ ppnum_t highPage = atop_64(addr + len - 1);
+ if (highPage > _highestPage) {
+ _highestPage = highPage;
+ }
+ }
+ }
+ if ((ind < count)
+ || (totalLength != ((IOByteCount) totalLength))) {
+ return false; /* overflow */
+ }
+ _length = totalLength;
+ _pages = pages;
+
+ // Auto-prepare memory at creation time.
+ // Implied completion when descriptor is free-ed
+
+
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ _wireCount++; // Physical MDs are, by definition, wired
+ } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
+ ioGMDData *dataP;
+ unsigned dataSize;
+
+ if (_pages > atop_64(max_mem)) {
+ return false;
+ }
+
+ dataSize = computeDataSize(_pages, /* upls */ count * 2);
+ if (!initMemoryEntries(dataSize, mapper)) {
+ return false;
+ }
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = _pages;
+
+ if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
+ && (VM_KERN_MEMORY_NONE == _kernelTag)) {
+ _kernelTag = IOMemoryTag(kernel_map);
+ if (_kernelTag == gIOSurfaceTag) {
+ _userTag = VM_MEMORY_IOSURFACE;
+ }
+ }
+
+ if ((kIOMemoryPersistent & _flags) && !_memRef) {
+ IOReturn
+ err = memoryReferenceCreate(0, &_memRef);
+ if (kIOReturnSuccess != err) {
+ return false;
+ }
+ }
+
+ if ((_flags & kIOMemoryAutoPrepare)
+ && prepare() != kIOReturnSuccess) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/*
+ * free
+ *
+ * Free resources.
+ */
+void
+IOGeneralMemoryDescriptor::free()
+{
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ if (reserved) {
+ LOCK;
+ reserved->dp.memory = NULL;
+ UNLOCK;
+ }
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ ioGMDData * dataP;
+ if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
+ dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
+ dataP->fMappedBaseValid = dataP->fMappedBase = 0;
+ }
+ } else {
+ while (_wireCount) {
+ complete();
+ }
+ }
+
+ if (_memoryEntries) {
+ _memoryEntries->release();
+ }
+
+ if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
+ if (kIOMemoryTypeUIO == type) {
+ uio_free((uio_t) _ranges.v);
+ }
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+ }
+#endif /* !__LP64__ */
+ else {
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
+
+ _ranges.v = NULL;
+ }
+
+ if (reserved) {
+ cleanKernelReserved(reserved);
+ if (reserved->dp.devicePager) {
+ // memEntry holds a ref on the device pager which owns reserved
+ // (IOMemoryDescriptorReserved) so no reserved access after this point
+ device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
+ } else {
+ IODelete(reserved, IOMemoryDescriptorReserved, 1);
+ }
+ reserved = NULL;
+ }
+
+ if (_memRef) {
+ memoryReferenceRelease(_memRef);
+ }
+ if (_prepareLock) {
+ IOLockFree(_prepareLock);
+ }
+
+ super::free();
+}
+
+#ifndef __LP64__
+void
+IOGeneralMemoryDescriptor::unmapFromKernel()
+{
+ panic("IOGMD::unmapFromKernel deprecated");
+}
+
+void
+IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
+{
+ panic("IOGMD::mapIntoKernel deprecated");
+}
+#endif /* !__LP64__ */
+
+/*
+ * getDirection:
+ *
+ * Get the direction of the transfer.
+ */
+IODirection
+IOMemoryDescriptor::getDirection() const
+{
+#ifndef __LP64__
+ if (_direction) {
+ return _direction;
+ }
+#endif /* !__LP64__ */
+ return (IODirection) (_flags & kIOMemoryDirectionMask);
+}
+
+/*
+ * getLength:
+ *
+ * Get the length of the transfer (over all ranges).
+ */
+IOByteCount
+IOMemoryDescriptor::getLength() const
+{
+ return _length;
+}
+
+void
+IOMemoryDescriptor::setTag( IOOptionBits tag )
+{
+ _tag = tag;
+}
+
+IOOptionBits
+IOMemoryDescriptor::getTag( void )
+{
+ return _tag;
+}
+
+uint64_t
+IOMemoryDescriptor::getFlags(void)
+{
+ return _flags;
+}
+
+#ifndef __LP64__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
+IOPhysicalAddress
+IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
+{
+ addr64_t physAddr = 0;
+
+ if (prepare() == kIOReturnSuccess) {
+ physAddr = getPhysicalSegment64( offset, length );
+ complete();
+ }
+
+ return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
+}
+
+#pragma clang diagnostic pop
+
+#endif /* !__LP64__ */
+
+IOByteCount
+IOMemoryDescriptor::readBytes
+(IOByteCount offset, void *bytes, IOByteCount length)
+{
+ addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount remaining;
+
+ // Assert that this entire I/O is withing the available range
+ assert(offset <= _length);
+ assert(offset + length <= _length);
+ if ((offset >= _length)
+ || ((offset + length) > _length)) {
+ return 0;
+ }
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t srcAddr64;
+ IOByteCount srcLen;
+
+ srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
+ if (!srcAddr64) {
+ break;
+ }
+
+ // Clip segment length to remaining
+ if (srcLen > remaining) {
+ srcLen = remaining;
+ }
+
+ copypv(srcAddr64, dstAddr, srcLen,
+ cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
+
+ dstAddr += srcLen;
+ offset += srcLen;
+ remaining -= srcLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ assert(!remaining);
+
+ return length - remaining;
+}
+
+IOByteCount
+IOMemoryDescriptor::writeBytes
+(IOByteCount inoffset, const void *bytes, IOByteCount length)
+{
+ addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount remaining;
+ IOByteCount offset = inoffset;
+
+ // Assert that this entire I/O is withing the available range
+ assert(offset <= _length);
+ assert(offset + length <= _length);
+
+ assert( !(kIOMemoryPreparedReadOnly & _flags));
+
+ if ((kIOMemoryPreparedReadOnly & _flags)
+ || (offset >= _length)
+ || ((offset + length) > _length)) {
+ return 0;
+ }
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t dstAddr64;
+ IOByteCount dstLen;
+
+ dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
+ if (!dstAddr64) {
+ break;
+ }
+
+ // Clip segment length to remaining
+ if (dstLen > remaining) {
+ dstLen = remaining;
+ }
+
+ if (!srcAddr) {
+ bzero_phys(dstAddr64, dstLen);
+ } else {
+ copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
+ cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
+ srcAddr += dstLen;
+ }
+ offset += dstLen;
+ remaining -= dstLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ assert(!remaining);
+
+#if defined(__x86_64__)
+ // copypv does not cppvFsnk on intel
+#else
+ if (!srcAddr) {
+ performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
+ }
+#endif
+
+ return length - remaining;
+}
+
+#ifndef __LP64__
+void
+IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
+{
+ panic("IOGMD::setPosition deprecated");
+}
+#endif /* !__LP64__ */
+
+static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
+
+uint64_t
+IOGeneralMemoryDescriptor::getPreparationID( void )
+{
+ ioGMDData *dataP;
+
+ if (!_wireCount) {
+ return kIOPreparationIDUnprepared;
+ }
+
+ if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
+ || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
+ IOMemoryDescriptor::setPreparationID();
+ return IOMemoryDescriptor::getPreparationID();
+ }
+
+ if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
+ return kIOPreparationIDUnprepared;
+ }
+
+ if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
+ OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
+ }
+ return dataP->fPreparationID;
+}
+
+void
+IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
+{
+ if (reserved->creator) {
+ task_deallocate(reserved->creator);
+ reserved->creator = NULL;
+ }
+}
+
+IOMemoryDescriptorReserved *
+IOMemoryDescriptor::getKernelReserved( void )
+{
+ if (!reserved) {
+ reserved = IONewZero(IOMemoryDescriptorReserved, 1);
+ }
+ return reserved;
+}
+
+void
+IOMemoryDescriptor::setPreparationID( void )
+{
+ if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
+ OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
+ }
+}
+
+uint64_t
+IOMemoryDescriptor::getPreparationID( void )
+{
+ if (reserved) {
+ return reserved->preparationID;
+ } else {
+ return kIOPreparationIDUnsupported;
+ }
+}
+
+void
+IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
+{
+ _kernelTag = (vm_tag_t) kernelTag;
+ _userTag = (vm_tag_t) userTag;
+}
+
+uint32_t
+IOMemoryDescriptor::getVMTag(vm_map_t map)
+{
+ if (vm_kernel_map_is_kernel(map)) {
+ if (VM_KERN_MEMORY_NONE != _kernelTag) {
+ return (uint32_t) _kernelTag;
+ }
+ } else {
+ if (VM_KERN_MEMORY_NONE != _userTag) {
+ return (uint32_t) _userTag;
+ }
+ }
+ return IOMemoryTag(map);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ IOReturn err = kIOReturnSuccess;
+ DMACommandOps params;
+ IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+ ioGMDData *dataP;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
+ if (kIOMDDMAMap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ if (_memoryEntries && data->fMapper) {
+ bool remap, keepMap;
+ dataP = getDataP(_memoryEntries);
+
+ if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
+ dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
+ }
+ if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
+ dataP->fDMAMapAlignment = data->fMapSpec.alignment;
+ }
+
+ keepMap = (data->fMapper == gIOSystemMapper);
+ keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
+
+ if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
+ IOLockLock(_prepareLock);
+ }
+
+ remap = (!keepMap);
+ remap |= (dataP->fDMAMapNumAddressBits < 64)
+ && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
+ remap |= (dataP->fDMAMapAlignment > page_size);
+
+ if (remap || !dataP->fMappedBaseValid) {
+// if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
+ err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
+ if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
+ dataP->fMappedBase = data->fAlloc;
+ dataP->fMappedBaseValid = true;
+ dataP->fMappedLength = data->fAllocLength;
+ data->fAllocLength = 0; // IOMD owns the alloc now
+ }
+ } else {
+ data->fAlloc = dataP->fMappedBase;
+ data->fAllocLength = 0; // give out IOMD map
+ md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
+ }
+ data->fMapContig = !dataP->fDiscontig;
+
+ if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
+ IOLockUnlock(_prepareLock);
+ }
+ }
+ return err;
+ }
+ if (kIOMDDMAUnmap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
+
+ return kIOReturnSuccess;
+ }
+
+ if (kIOMDAddDMAMapSpec == op) {
+ if (dataSize < sizeof(IODMAMapSpecification)) {
+ return kIOReturnUnderrun;
+ }
+
+ IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ if (_memoryEntries) {
+ dataP = getDataP(_memoryEntries);
+ if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
+ dataP->fDMAMapNumAddressBits = data->numAddressBits;
+ }
+ if (data->alignment > dataP->fDMAMapAlignment) {
+ dataP->fDMAMapAlignment = data->alignment;
+ }
+ }
+ return kIOReturnSuccess;
+ }
+
+ if (kIOMDGetCharacteristics == op) {
+ if (dataSize < sizeof(IOMDDMACharacteristics)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = _length;
+ data->fSGCount = _rangesCount;
+ data->fPages = _pages;
+ data->fDirection = getDirection();
+ if (!_wireCount) {
+ data->fIsPrepared = false;
+ } else {
+ data->fIsPrepared = true;
+ data->fHighestPage = _highestPage;
+ if (_memoryEntries) {
+ dataP = getDataP(_memoryEntries);
+ ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt count = getNumIOPL(_memoryEntries, dataP);
+ if (count == 1) {
+ data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
+ }
+ }
+ }
+
+ return kIOReturnSuccess;
+ } else if (kIOMDDMAActive == op) {
+ if (params) {
+ int16_t prior;
+ prior = OSAddAtomic16(1, &md->_dmaReferences);
+ if (!prior) {
+ md->_mapName = NULL;
+ }
+ } else {
+ if (md->_dmaReferences) {
+ OSAddAtomic16(-1, &md->_dmaReferences);
+ } else {
+ panic("_dmaReferences underflow");
+ }
+ }
+ } else if (kIOMDWalkSegments != op) {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the next segment
+ struct InternalState {
+ IOMDDMAWalkSegmentArgs fIO;
+ mach_vm_size_t fOffset2Index;
+ mach_vm_size_t fNextOffset;
+ UInt fIndex;
+ } *isP;
+
+ // Find the next segment
+ if (dataSize < sizeof(*isP)) {
+ return kIOReturnUnderrun;
+ }
+
+ isP = (InternalState *) vData;
+ mach_vm_size_t offset = isP->fIO.fOffset;
+ uint8_t mapped = isP->fIO.fMapped;
+ uint64_t mappedBase;
+
+ if (mapped && (kIOMemoryRemote & _flags)) {
+ return kIOReturnNotAttached;
+ }
+
+ if (IOMapper::gSystem && mapped
+ && (!(kIOMemoryHostOnly & _flags))
+ && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
+// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ dataP = getDataP(_memoryEntries);
+ if (dataP->fMapper) {
+ IODMAMapSpecification mapSpec;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+ mapSpec.alignment = dataP->fDMAMapAlignment;
+ err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
+ if (kIOReturnSuccess != err) {
+ return err;
+ }
+ dataP->fMappedBaseValid = true;
+ }
+ }
+
+ if (kIOMDDMAWalkMappedLocal == mapped) {
+ mappedBase = isP->fIO.fMappedBase;
+ } else if (mapped) {
+ if (IOMapper::gSystem
+ && (!(kIOMemoryHostOnly & _flags))
+ && _memoryEntries
+ && (dataP = getDataP(_memoryEntries))
+ && dataP->fMappedBaseValid) {
+ mappedBase = dataP->fMappedBase;
+ } else {
+ mapped = 0;
+ }
+ }
+
+ if (offset >= _length) {
+ return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
+ }
+
+ // Validate the previous offset
+ UInt ind;
+ mach_vm_size_t off2Ind = isP->fOffset2Index;
+ if (!params
+ && offset
+ && (offset == isP->fNextOffset || off2Ind <= offset)) {
+ ind = isP->fIndex;
+ } else {
+ ind = off2Ind = 0; // Start from beginning
+ }
+ mach_vm_size_t length;
+ UInt64 address;
+
+ if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
+ // Physical address based memory descriptor
+ const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
+
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
+
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
+
+ if (true && mapped) {
+ address = mappedBase + offset;
+ } else {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
+ }
+
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#ifndef __LP64__
+ else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
+ // Physical address based memory descriptor
+ const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
+
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
+
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
+
+ if (true && mapped) {
+ address = mappedBase + offset;
+ } else {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
+ }
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#endif /* !__LP64__ */
+ else {
+ do {
+ if (!_wireCount) {
+ panic("IOGMD: not wired for the IODMACommand");
+ }
+
+ assert(_memoryEntries);
+
+ dataP = getDataP(_memoryEntries);
+ const ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+ upl_page_info_t *pageList = getPageList(dataP);
+
+ assert(numIOPLs > 0);
+
+ // Scan through iopl info blocks looking for block containing offset
+ while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
+ ind++;
+ }
+
+ // Go back to actual range as search goes past it
+ ioPLBlock ioplInfo = ioplList[ind - 1];
+ off2Ind = ioplInfo.fIOMDOffset;
+
+ if (ind < numIOPLs) {
+ length = ioplList[ind].fIOMDOffset;
+ } else {
+ length = _length;
+ }
+ length -= offset; // Remainder within iopl
+
+ // Subtract offset till this iopl in total list
+ offset -= off2Ind;
+
+ // If a mapped address is requested and this is a pre-mapped IOPL
+ // then just need to compute an offset relative to the mapped base.
+ if (mapped) {
+ offset += (ioplInfo.fPageOffset & PAGE_MASK);
+ address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
+ continue; // Done leave do/while(false) now
+ }
+
+ // The offset is rebased into the current iopl.
+ // Now add the iopl 1st page offset.
+ offset += ioplInfo.fPageOffset;
+
+ // For external UPLs the fPageInfo field points directly to
+ // the upl's upl_page_info_t array.
+ if (ioplInfo.fFlags & kIOPLExternUPL) {
+ pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
+ } else {
+ pageList = &pageList[ioplInfo.fPageInfo];
+ }
+
+ // Check for direct device non-paged memory
+ if (ioplInfo.fFlags & kIOPLOnDevice) {
+ address = ptoa_64(pageList->phys_addr) + offset;
+ continue; // Done leave do/while(false) now
+ }
+
+ // Now we need compute the index into the pageList
+ UInt pageInd = atop_32(offset);
+ offset &= PAGE_MASK;
+
+ // Compute the starting address of this segment
+ IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
+ if (!pageAddr) {
+ panic("!pageList phys_addr");
+ }
+
+ address = ptoa_64(pageAddr) + offset;
+
+ // length is currently set to the length of the remainider of the iopl.
+ // We need to check that the remainder of the iopl is contiguous.
+ // This is indicated by pageList[ind].phys_addr being sequential.
+ IOByteCount contigLength = PAGE_SIZE - offset;
+ while (contigLength < length
+ && ++pageAddr == pageList[++pageInd].phys_addr) {
+ contigLength += PAGE_SIZE;
+ }
+
+ if (contigLength < length) {
+ length = contigLength;
+ }
+
+
+ assert(address);
+ assert(length);
+ } while (false);
+ }
+
+ // Update return values and state
+ isP->fIO.fIOVMAddr = address;
+ isP->fIO.fLength = length;
+ isP->fIndex = ind;
+ isP->fOffset2Index = off2Ind;
+ isP->fNextOffset = isP->fIO.fOffset + length;
+
+ return kIOReturnSuccess;
+}
+
+addr64_t
+IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
+{
+ IOReturn ret;
+ mach_vm_address_t address = 0;
+ mach_vm_size_t length = 0;
+ IOMapper * mapper = gIOSystemMapper;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ if (lengthOfSegment) {
+ *lengthOfSegment = 0;
+ }
+
+ if (offset >= _length) {
+ return 0;
+ }
+
+ // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
+ // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
+ // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
+ // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
+
+ if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
+ unsigned rangesIndex = 0;
+ Ranges vec = _ranges;
+ mach_vm_address_t addr;
+
+ // Find starting address within the vector of ranges
+ for (;;) {
+ getAddrLenForInd(addr, length, type, vec, rangesIndex);
+ if (offset < length) {
+ break;
+ }
+ offset -= length; // (make offset relative)
+ rangesIndex++;
+ }
+
+ // Now that we have the starting range,
+ // lets find the last contiguous range
+ addr += offset;
+ length -= offset;
+
+ for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
+ mach_vm_address_t newAddr;
+ mach_vm_size_t newLen;
+
+ getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
+ if (addr + length != newAddr) {
+ break;
+ }
+ length += newLen;
+ }
+ if (addr) {
+ address = (IOPhysicalAddress) addr; // Truncate address to 32bit
+ }
+ } else {
+ IOMDDMAWalkSegmentState _state;
+ IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
+
+ state->fOffset = offset;
+ state->fLength = _length - offset;
+ state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
+
+ ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
+
+ if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
+ DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
+ ret, this, state->fOffset,
+ state->fIOVMAddr, state->fLength);
+ }
+ if (kIOReturnSuccess == ret) {
+ address = state->fIOVMAddr;
+ length = state->fLength;
+ }
+
+ // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
+ // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
+
+ if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
+ if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
+ addr64_t origAddr = address;
+ IOByteCount origLen = length;
+
+ address = mapper->mapToPhysicalAddress(origAddr);
+ length = page_size - (address & (page_size - 1));
+ while ((length < origLen)
+ && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
+ length += page_size;
+ }
+ if (length > origLen) {
+ length = origLen;
+ }
+ }
+ }
+ }
+
+ if (!address) {
+ length = 0;
+ }
+
+ if (lengthOfSegment) {
+ *lengthOfSegment = length;
+ }
+
+ return address;
+}
+
+#ifndef __LP64__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+addr64_t
+IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
+{
+ addr64_t address = 0;
+
+ if (options & _kIOMemorySourceSegment) {
+ address = getSourceSegment(offset, lengthOfSegment);
+ } else if (options & kIOMemoryMapperNone) {
+ address = getPhysicalSegment64(offset, lengthOfSegment);
+ } else {
+ address = getPhysicalSegment(offset, lengthOfSegment);
+ }
+
+ return address;
+}
+#pragma clang diagnostic pop
+
+addr64_t
+IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
+}
+
+IOPhysicalAddress
+IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ addr64_t address = 0;
+ IOByteCount length = 0;
+
+ address = getPhysicalSegment(offset, lengthOfSegment, 0);
+
+ if (lengthOfSegment) {
+ length = *lengthOfSegment;
+ }
+
+ if ((address + length) > 0x100000000ULL) {
+ panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
+ address, (long) length, (getMetaClass())->getClassName());
+ }
+
+ return (IOPhysicalAddress) address;
+}
+
+addr64_t
+IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ IOPhysicalAddress phys32;
+ IOByteCount length;
+ addr64_t phys64;
+ IOMapper * mapper = NULL;
+
+ phys32 = getPhysicalSegment(offset, lengthOfSegment);
+ if (!phys32) {
+ return 0;
+ }
+
+ if (gIOSystemMapper) {
+ mapper = gIOSystemMapper;
+ }
+
+ if (mapper) {
+ IOByteCount origLen;
+
+ phys64 = mapper->mapToPhysicalAddress(phys32);
+ origLen = *lengthOfSegment;
+ length = page_size - (phys64 & (page_size - 1));
+ while ((length < origLen)
+ && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
+ length += page_size;
+ }
+ if (length > origLen) {
+ length = origLen;
+ }
+
+ *lengthOfSegment = length;
+ } else {
+ phys64 = (addr64_t) phys32;
+ }
+
+ return phys64;
+}
+
+IOPhysicalAddress
+IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
+}
+
+IOPhysicalAddress
+IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
+}
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+void *
+IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+ IOByteCount * lengthOfSegment)
+{
+ if (_task == kernel_task) {
+ return (void *) getSourceSegment(offset, lengthOfSegment);
+ } else {
+ panic("IOGMD::getVirtualSegment deprecated");
+ }
+
+ return NULL;
+}
+#pragma clang diagnostic pop
+#endif /* !__LP64__ */
+
+IOReturn
+IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
+ DMACommandOps params;
+ IOReturn err;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
+ if (kIOMDGetCharacteristics == op) {
+ if (dataSize < sizeof(IOMDDMACharacteristics)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = getLength();
+ data->fSGCount = 0;
+ data->fDirection = getDirection();
+ data->fIsPrepared = true; // Assume prepared - fails safe
+ } else if (kIOMDWalkSegments == op) {
+ if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
+ IOByteCount offset = (IOByteCount) data->fOffset;
+
+ IOPhysicalLength length;
+ if (data->fMapped && IOMapper::gSystem) {
+ data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
+ } else {
+ data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
+ }
+ data->fLength = length;
+ } else if (kIOMDAddDMAMapSpec == op) {
+ return kIOReturnUnsupported;
+ } else if (kIOMDDMAMap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ if (params) {
+ panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
+ }
+
+ data->fMapContig = true;
+ err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
+
+ return err;
+ } else if (kIOMDDMAUnmap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
+
+ return kIOReturnSuccess;
+ } else {
+ return kIOReturnBadArgument;
+ }
+
+ return kIOReturnSuccess;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnSuccess;
+
+ vm_purgable_t control;
+ int state;
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if (_memRef) {
+ err = super::setPurgeable(newState, oldState);
+ } else {
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ do{
+ // Find the appropriate vm_map for the given task
+ vm_map_t curMap;
+ if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
+ err = kIOReturnNotReady;
+ break;
+ } else if (!_task) {
+ err = kIOReturnUnsupported;
+ break;
+ } else {
+ curMap = get_task_map(_task);
+ if (NULL == curMap) {
+ err = KERN_INVALID_ARGUMENT;
+ break;
+ }
+ }
+
+ // can only do one range
+ Ranges vec = _ranges;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ mach_vm_address_t addr;
+ mach_vm_size_t len;
+ getAddrLenForInd(addr, len, type, vec, 0);
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (kIOReturnSuccess != err) {
+ break;
+ }
+ err = vm_map_purgable_control(curMap, addr, control, &state);
+ if (oldState) {
+ if (kIOReturnSuccess == err) {
+ err = purgeableStateBits(&state);
+ *oldState = state;
+ }
+ }
+ }while (false);
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+ }
+
+ return err;
+}
+
+IOReturn
+IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnNotReady;
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ if (_memRef) {
+ err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
+ }
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ return err;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
+ int newLedgerTag,
+ IOOptionBits newLedgerOptions )
+{
+ IOReturn err = kIOReturnSuccess;
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if (iokit_iomd_setownership_enabled == FALSE) {
+ return kIOReturnUnsupported;
+ }
+
+ if (_memRef) {
+ err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
+ } else {
+ err = kIOReturnUnsupported;
+ }
+
+ return err;
+}
+
+IOReturn
+IOMemoryDescriptor::setOwnership( task_t newOwner,
+ int newLedgerTag,
+ IOOptionBits newLedgerOptions )
+{
+ IOReturn err = kIOReturnNotReady;
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if (iokit_iomd_setownership_enabled == FALSE) {
+ return kIOReturnUnsupported;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ if (_memRef) {
+ err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
+ } else {
+ IOMultiMemoryDescriptor * mmd;
+ IOSubMemoryDescriptor * smd;
+ if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
+ err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
+ } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
+ err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
+ }
+ }
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ return err;
+}