+ if (KERN_SUCCESS == err) {
+ addr = ref.mapped;
+ map = ref.map;
+ didAlloc = true;
+ }
+#if LOGUNALIGN
+ IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
+#endif
+ }
+
+ /*
+ * If the memory is associated with a device pager but doesn't have a UPL,
+ * it will be immediately faulted in through the pager via populateDevicePager().
+ * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
+ * operations.
+ */
+ if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
+ options &= ~kIOMapPrefault;
+ }
+
+ /*
+ * Prefaulting is only possible if we wired the memory earlier. Check the
+ * memory type, and the underlying data.
+ */
+ if (options & kIOMapPrefault) {
+ /*
+ * The memory must have been wired by calling ::prepare(), otherwise
+ * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
+ */
+ assert(_wireCount != 0);
+ assert(_memoryEntries != NULL);
+ if ((_wireCount == 0) ||
+ (_memoryEntries == NULL)) {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the page list.
+ ioGMDData* dataP = getDataP(_memoryEntries);
+ ioPLBlock const* ioplList = getIOPLList(dataP);
+ pageList = getPageList(dataP);
+
+ // Get the number of IOPLs.
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+
+ /*
+ * Scan through the IOPL Info Blocks, looking for the first block containing
+ * the offset. The research will go past it, so we'll need to go back to the
+ * right range at the end.
+ */
+ UInt ioplIndex = 0;
+ while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
+ ioplIndex++;
+ }
+ ioplIndex--;
+
+ // Retrieve the IOPL info block.
+ ioPLBlock ioplInfo = ioplList[ioplIndex];
+
+ /*
+ * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
+ * array.
+ */
+ if (ioplInfo.fFlags & kIOPLExternUPL) {
+ pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
+ } else {
+ pageList = &pageList[ioplInfo.fPageInfo];
+ }
+
+ // Rebase [offset] into the IOPL in order to looks for the first page index.
+ mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
+
+ // Retrieve the index of the first page corresponding to the offset.
+ currentPageIndex = atop_32(offsetInIOPL);
+ }
+
+ // enter mappings
+ remain = size;
+ mapAddr = addr;
+ entryIdx = firstEntryIdx;
+ entry = &ref->entries[entryIdx];
+
+ while (remain && (KERN_SUCCESS == err)) {
+#if LOGUNALIGN
+ printf("offset %qx, %qx\n", offset, entry->offset);
+#endif
+ if (kIODefaultCache != cacheMode) {
+ vm_size_t unused = 0;
+ err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
+ memEntryCacheMode, NULL, entry->entry);
+ assert(KERN_SUCCESS == err);
+ }
+ entryOffset = offset - entry->offset;
+ if (entryOffset >= entry->size) {
+ panic("entryOffset");
+ }
+ chunk = entry->size - entryOffset;
+#if LOGUNALIGN
+ printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
+#endif
+ if (chunk) {
+ vm_map_kernel_flags_t vmk_flags;
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
+
+ if (chunk > remain) {
+ chunk = remain;
+ }
+ mapAddrOut = mapAddr;
+ if (options & kIOMapPrefault) {
+ UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
+
+ err = vm_map_enter_mem_object_prefault(map,
+ &mapAddrOut,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_FLAGS_RETURN_DATA_ADDR),
+ vmk_flags,
+ tag,
+ entry->entry,
+ entryOffset,
+ prot, // cur
+ prot, // max
+ &pageList[currentPageIndex],
+ nb_pages);
+
+ // Compute the next index in the page list.
+ currentPageIndex += nb_pages;
+ assert(currentPageIndex <= _pages);
+ } else {
+#if LOGUNALIGN
+ printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
+#endif
+ err = vm_map_enter_mem_object(map,
+ &mapAddrOut,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_FLAGS_RETURN_DATA_ADDR),
+ vmk_flags,
+ tag,
+ entry->entry,
+ entryOffset,
+ false, // copy
+ prot, // cur
+ prot, // max
+ VM_INHERIT_NONE);
+ }
+ if (KERN_SUCCESS != err) {
+ panic("map enter err %x", err);
+ break;
+ }
+#if LOGUNALIGN
+ printf("mapAddr o %qx\n", mapAddrOut);
+#endif
+ if (entryIdx == firstEntryIdx) {
+ addr = mapAddrOut;
+ }
+ remain -= chunk;
+ if (!remain) {
+ break;
+ }
+ mach_vm_size_t entrySize;
+ err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
+ assert(KERN_SUCCESS == err);
+ mapAddr += entrySize;
+ offset += chunk;
+ }
+
+ entry++;
+ entryIdx++;
+ if (entryIdx >= ref->count) {
+ err = kIOReturnOverrun;
+ break;
+ }
+ }
+
+ if (KERN_SUCCESS != err) {
+ DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
+ }
+
+ if ((KERN_SUCCESS != err) && didAlloc) {
+ (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
+ addr = 0;
+ }
+ *inaddr = addr;
+
+ return err;
+}
+
+uint64_t
+IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
+ IOMemoryReference * ref,
+ uint64_t * offset)
+{
+ kern_return_t kr;
+ vm_object_offset_t data_offset = 0;
+ uint64_t total;
+ uint32_t idx;
+
+ assert(ref->count);
+ if (offset) {
+ *offset = (uint64_t) data_offset;
+ }
+ total = 0;
+ for (idx = 0; idx < ref->count; idx++) {
+ kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
+ &data_offset);
+ if (KERN_SUCCESS != kr) {
+ DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
+ } else if (0 != data_offset) {
+ DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
+ }
+ if (offset && !idx) {
+ *offset = (uint64_t) data_offset;
+ }
+ total += round_page(data_offset + ref->entries[idx].size);
+ }
+
+ DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
+ (offset ? *offset : (vm_object_offset_t)-1), total);
+
+ return total;
+}
+
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
+ IOMemoryReference * ref,
+ IOByteCount * residentPageCount,
+ IOByteCount * dirtyPageCount)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ unsigned int resident, dirty;
+ unsigned int totalResident, totalDirty;
+
+ totalResident = totalDirty = 0;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0]) {
+ entries--;
+ err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ totalResident += resident;
+ totalDirty += dirty;
+ }
+
+ if (residentPageCount) {
+ *residentPageCount = totalResident;
+ }
+ if (dirtyPageCount) {
+ *dirtyPageCount = totalDirty;
+ }
+ return err;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
+ IOMemoryReference * ref,
+ IOOptionBits newState,
+ IOOptionBits * oldState)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ vm_purgable_t control;
+ int totalState, state;
+
+ totalState = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0]) {
+ entries--;
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ err = purgeableStateBits(&state);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+
+ if (kIOMemoryPurgeableEmpty == state) {
+ totalState = kIOMemoryPurgeableEmpty;
+ } else if (kIOMemoryPurgeableEmpty == totalState) {
+ continue;
+ } else if (kIOMemoryPurgeableVolatile == totalState) {
+ continue;
+ } else if (kIOMemoryPurgeableVolatile == state) {
+ totalState = kIOMemoryPurgeableVolatile;
+ } else {
+ totalState = kIOMemoryPurgeableNonVolatile;
+ }
+ }
+
+ if (oldState) {
+ *oldState = totalState;
+ }
+ return err;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
+ IOMemoryReference * ref,
+ task_t newOwner,
+ int newLedgerTag,
+ IOOptionBits newLedgerOptions)
+{
+ IOReturn err, totalErr;
+ IOMemoryEntry * entries;
+
+ totalErr = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0]) {
+ entries--;
+
+ err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
+ if (KERN_SUCCESS != err) {
+ totalErr = err;
+ }
+ }
+
+ return totalErr;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withAddress(void * address,
+ IOByteCount length,
+ IODirection direction)
+{
+ return IOMemoryDescriptor::
+ withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
+}
+
+#ifndef __LP64__
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withAddress(IOVirtualAddress address,
+ IOByteCount length,
+ IODirection direction,
+ task_t task)
+{
+ OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
+ if (that) {
+ if (that->initWithAddress(address, length, direction, task)) {
+ return os::move(that);
+ }
+ }
+ return nullptr;
+}
+#endif /* !__LP64__ */
+
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withPhysicalAddress(
+ IOPhysicalAddress address,
+ IOByteCount length,
+ IODirection direction )
+{
+ return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
+}
+
+#ifndef __LP64__
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
+ UInt32 withCount,
+ IODirection direction,
+ task_t task,
+ bool asReference)
+{
+ OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
+ if (that) {
+ if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
+ return os::move(that);
+ }
+ }
+ return nullptr;
+}
+#endif /* !__LP64__ */
+
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
+ mach_vm_size_t length,
+ IOOptionBits options,
+ task_t task)
+{
+ IOAddressRange range = { address, length };
+ return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
+}
+
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
+ UInt32 rangeCount,
+ IOOptionBits options,
+ task_t task)
+{
+ OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
+ if (that) {
+ if (task) {
+ options |= kIOMemoryTypeVirtual64;
+ } else {
+ options |= kIOMemoryTypePhysical64;
+ }
+
+ if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
+ return os::move(that);
+ }
+ }
+
+ return nullptr;
+}
+
+
+/*
+ * withOptions:
+ *
+ * Create a new IOMemoryDescriptor. The buffer is made up of several
+ * virtual address ranges, from a given task.
+ *
+ * Passing the ranges as a reference will avoid an extra allocation.
+ */
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits opts,
+ IOMapper * mapper)
+{
+ OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
+
+ if (self
+ && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
+ return nullptr;
+ }
+
+ return os::move(self);
+}
+
+bool
+IOMemoryDescriptor::initWithOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits options,
+ IOMapper * mapper)
+{
+ return false;
+}
+
+#ifndef __LP64__
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
+ UInt32 withCount,
+ IODirection direction,
+ bool asReference)
+{
+ OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
+ if (that) {
+ if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
+ return os::move(that);
+ }
+ }
+ return nullptr;
+}
+
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
+ IOByteCount offset,
+ IOByteCount length,
+ IODirection direction)
+{
+ return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
+}
+#endif /* !__LP64__ */
+
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
+{
+ IOGeneralMemoryDescriptor *origGenMD =
+ OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
+
+ if (origGenMD) {
+ return IOGeneralMemoryDescriptor::
+ withPersistentMemoryDescriptor(origGenMD);
+ } else {
+ return nullptr;
+ }
+}
+
+OSSharedPtr<IOMemoryDescriptor>
+IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
+{
+ IOMemoryReference * memRef;
+ OSSharedPtr<IOGeneralMemoryDescriptor> self;
+
+ if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
+ return nullptr;
+ }
+
+ if (memRef == originalMD->_memRef) {
+ self.reset(originalMD, OSRetain);
+ originalMD->memoryReferenceRelease(memRef);
+ return os::move(self);
+ }
+
+ self = OSMakeShared<IOGeneralMemoryDescriptor>();
+ IOMDPersistentInitData initData = { originalMD, memRef };
+
+ if (self
+ && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
+ return nullptr;
+ }
+ return os::move(self);
+}
+
+#ifndef __LP64__
+bool
+IOGeneralMemoryDescriptor::initWithAddress(void * address,
+ IOByteCount withLength,
+ IODirection withDirection)
+{
+ _singleRange.v.address = (vm_offset_t) address;
+ _singleRange.v.length = withLength;
+
+ return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
+ IOByteCount withLength,
+ IODirection withDirection,
+ task_t withTask)
+{
+ _singleRange.v.address = address;
+ _singleRange.v.length = withLength;
+
+ return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithPhysicalAddress(
+ IOPhysicalAddress address,
+ IOByteCount withLength,
+ IODirection withDirection )
+{
+ _singleRange.p.address = address;
+ _singleRange.p.length = withLength;
+
+ return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithPhysicalRanges(
+ IOPhysicalRange * ranges,
+ UInt32 count,
+ IODirection direction,
+ bool reference)
+{
+ IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
+
+ if (reference) {
+ mdOpts |= kIOMemoryAsReference;
+ }
+
+ return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
+}
+
+bool
+IOGeneralMemoryDescriptor::initWithRanges(
+ IOVirtualRange * ranges,
+ UInt32 count,
+ IODirection direction,
+ task_t task,
+ bool reference)
+{
+ IOOptionBits mdOpts = direction;
+
+ if (reference) {
+ mdOpts |= kIOMemoryAsReference;
+ }
+
+ if (task) {
+ mdOpts |= kIOMemoryTypeVirtual;
+
+ // Auto-prepare if this is a kernel memory descriptor as very few
+ // clients bother to prepare() kernel memory.
+ // But it was not enforced so what are you going to do?
+ if (task == kernel_task) {
+ mdOpts |= kIOMemoryAutoPrepare;
+ }
+ } else {
+ mdOpts |= kIOMemoryTypePhysical;
+ }
+
+ return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
+}
+#endif /* !__LP64__ */
+
+/*
+ * initWithOptions:
+ *
+ * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
+ * from a given task, several physical ranges, an UPL from the ubc
+ * system or a uio (may be 64bit) from the BSD subsystem.
+ *
+ * Passing the ranges as a reference will avoid an extra allocation.
+ *
+ * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
+ * existing instance -- note this behavior is not commonly supported in other
+ * I/O Kit classes, although it is supported here.
+ */
+
+bool
+IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
+ UInt32 count,
+ UInt32 offset,
+ task_t task,
+ IOOptionBits options,
+ IOMapper * mapper)
+{
+ IOOptionBits type = options & kIOMemoryTypeMask;
+
+#ifndef __LP64__
+ if (task
+ && (kIOMemoryTypeVirtual == type)
+ && vm_map_is_64bit(get_task_map(task))
+ && ((IOVirtualRange *) buffers)->address) {
+ OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
+ return false;
+ }
+#endif /* !__LP64__ */
+
+ // Grab the original MD's configuation data to initialse the
+ // arguments to this function.
+ if (kIOMemoryTypePersistentMD == type) {
+ IOMDPersistentInitData *initData = (typeof(initData))buffers;
+ const IOGeneralMemoryDescriptor *orig = initData->fMD;
+ ioGMDData *dataP = getDataP(orig->_memoryEntries);
+
+ // Only accept persistent memory descriptors with valid dataP data.
+ assert(orig->_rangesCount == 1);
+ if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
+ return false;
+ }
+
+ _memRef = initData->fMemRef; // Grab the new named entry
+ options = orig->_flags & ~kIOMemoryAsReference;
+ type = options & kIOMemoryTypeMask;
+ buffers = orig->_ranges.v;
+ count = orig->_rangesCount;
+
+ // Now grab the original task and whatever mapper was previously used
+ task = orig->_task;
+ mapper = dataP->fMapper;
+
+ // We are ready to go through the original initialisation now
+ }
+
+ switch (type) {
+ case kIOMemoryTypeUIO:
+ case kIOMemoryTypeVirtual:
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+#endif /* !__LP64__ */
+ assert(task);
+ if (!task) {
+ return false;
+ }
+ break;
+
+ case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
+#ifndef __LP64__
+ case kIOMemoryTypePhysical64:
+#endif /* !__LP64__ */
+ case kIOMemoryTypeUPL:
+ assert(!task);
+ break;
+ default:
+ return false; /* bad argument */
+ }
+
+ assert(buffers);
+ assert(count);
+
+ /*
+ * We can check the _initialized instance variable before having ever set
+ * it to an initial value because I/O Kit guarantees that all our instance
+ * variables are zeroed on an object's allocation.
+ */
+
+ if (_initialized) {
+ /*
+ * An existing memory descriptor is being retargeted to point to
+ * somewhere else. Clean up our present state.
+ */
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
+ while (_wireCount) {
+ complete();
+ }
+ }
+ if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
+ if (kIOMemoryTypeUIO == type) {
+ uio_free((uio_t) _ranges.v);
+ }
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+ }
+#endif /* !__LP64__ */
+ else {
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
+ }
+
+ options |= (kIOMemoryRedirected & _flags);
+ if (!(kIOMemoryRedirected & options)) {
+ if (_memRef) {
+ memoryReferenceRelease(_memRef);
+ _memRef = NULL;
+ }
+ if (_mappings) {
+ _mappings->flushCollection();
+ }
+ }
+ } else {
+ if (!super::init()) {
+ return false;
+ }
+ _initialized = true;
+ }
+
+ // Grab the appropriate mapper
+ if (kIOMemoryHostOrRemote & options) {
+ options |= kIOMemoryMapperNone;
+ }
+ if (kIOMemoryMapperNone & options) {
+ mapper = NULL; // No Mapper
+ } else if (mapper == kIOMapperSystem) {
+ IOMapper::checkForSystemMapper();
+ gIOSystemMapper = mapper = IOMapper::gSystem;
+ }
+
+ // Remove the dynamic internal use flags from the initial setting
+ options &= ~(kIOMemoryPreparedReadOnly);
+ _flags = options;
+ _task = task;
+
+#ifndef __LP64__
+ _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
+#endif /* !__LP64__ */
+
+ _dmaReferences = 0;
+ __iomd_reservedA = 0;
+ __iomd_reservedB = 0;
+ _highestPage = 0;
+
+ if (kIOMemoryThreadSafe & options) {
+ if (!_prepareLock) {
+ _prepareLock = IOLockAlloc();
+ }
+ } else if (_prepareLock) {
+ IOLockFree(_prepareLock);
+ _prepareLock = NULL;
+ }
+
+ if (kIOMemoryTypeUPL == type) {
+ ioGMDData *dataP;
+ unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
+
+ if (!initMemoryEntries(dataSize, mapper)) {
+ return false;
+ }
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = 0;
+ switch (kIOMemoryDirectionMask & options) {
+ case kIODirectionOut:
+ dataP->fDMAAccess = kIODMAMapReadAccess;
+ break;
+ case kIODirectionIn:
+ dataP->fDMAAccess = kIODMAMapWriteAccess;
+ break;
+ case kIODirectionNone:
+ case kIODirectionOutIn:
+ default:
+ panic("bad dir for upl 0x%x\n", (int) options);
+ break;
+ }
+ // _wireCount++; // UPLs start out life wired
+
+ _length = count;
+ _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
+
+ ioPLBlock iopl;
+ iopl.fIOPL = (upl_t) buffers;
+ upl_set_referenced(iopl.fIOPL, true);
+ upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
+
+ if (upl_get_size(iopl.fIOPL) < (count + offset)) {
+ panic("short external upl");
+ }
+
+ _highestPage = upl_get_highest_page(iopl.fIOPL);
+ DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
+
+ // Set the flag kIOPLOnDevice convieniently equal to 1
+ iopl.fFlags = pageList->device | kIOPLExternUPL;
+ if (!pageList->device) {
+ // Pre-compute the offset into the UPL's page list
+ pageList = &pageList[atop_32(offset)];
+ offset &= PAGE_MASK;
+ }
+ iopl.fIOMDOffset = 0;
+ iopl.fMappedPage = 0;
+ iopl.fPageInfo = (vm_address_t) pageList;
+ iopl.fPageOffset = offset;
+ _memoryEntries->appendBytes(&iopl, sizeof(iopl));
+ } else {
+ // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
+ // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
+
+ // Initialize the memory descriptor
+ if (options & kIOMemoryAsReference) {
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+
+ // Hack assignment to get the buffer arg into _ranges.
+ // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
+ // work, C++ sigh.
+ // This also initialises the uio & physical ranges.
+ _ranges.v = (IOVirtualRange *) buffers;
+ } else {
+#ifndef __LP64__
+ _rangesIsAllocated = true;
+#endif /* !__LP64__ */
+ switch (type) {
+ case kIOMemoryTypeUIO:
+ _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
+ break;
+
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+ case kIOMemoryTypePhysical64:
+ if (count == 1
+#ifndef __arm__
+ && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
+#endif
+ ) {
+ if (kIOMemoryTypeVirtual64 == type) {
+ type = kIOMemoryTypeVirtual;
+ } else {
+ type = kIOMemoryTypePhysical;
+ }
+ _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
+ _rangesIsAllocated = false;
+ _ranges.v = &_singleRange.v;
+ _singleRange.v.address = ((IOAddressRange *) buffers)->address;
+ _singleRange.v.length = ((IOAddressRange *) buffers)->length;
+ break;
+ }
+ _ranges.v64 = IONew(IOAddressRange, count);
+ if (!_ranges.v64) {
+ return false;
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
+ break;
+#endif /* !__LP64__ */
+ case kIOMemoryTypeVirtual:
+ case kIOMemoryTypePhysical:
+ if (count == 1) {
+ _flags |= kIOMemoryAsReference;
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+ _ranges.v = &_singleRange.v;
+ } else {
+ _ranges.v = IONew(IOVirtualRange, count);
+ if (!_ranges.v) {
+ return false;
+ }
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
+ break;
+ }
+ }
+ _rangesCount = count;
+
+ // Find starting address within the vector of ranges
+ Ranges vec = _ranges;
+ mach_vm_size_t totalLength = 0;
+ unsigned int ind, pages = 0;
+ for (ind = 0; ind < count; ind++) {
+ mach_vm_address_t addr;
+ mach_vm_address_t endAddr;
+ mach_vm_size_t len;
+
+ // addr & len are returned by this function
+ getAddrLenForInd(addr, len, type, vec, ind);
+ if (_task) {
+ mach_vm_size_t phys_size;
+ kern_return_t kret;
+ kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
+ if (KERN_SUCCESS != kret) {
+ break;
+ }
+ if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
+ break;
+ }
+ } else {
+ if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
+ break;
+ }
+ if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
+ break;
+ }
+ if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
+ break;
+ }
+ }
+ if (os_add_overflow(totalLength, len, &totalLength)) {
+ break;
+ }
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ uint64_t highPage = atop_64(addr + len - 1);
+ if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
+ _highestPage = (ppnum_t) highPage;
+ DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
+ }
+ }
+ }
+ if ((ind < count)
+ || (totalLength != ((IOByteCount) totalLength))) {
+ return false; /* overflow */
+ }
+ _length = totalLength;
+ _pages = pages;
+
+ // Auto-prepare memory at creation time.
+ // Implied completion when descriptor is free-ed
+
+
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ _wireCount++; // Physical MDs are, by definition, wired
+ } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
+ ioGMDData *dataP;
+ unsigned dataSize;
+
+ if (_pages > atop_64(max_mem)) {
+ return false;
+ }
+
+ dataSize = computeDataSize(_pages, /* upls */ count * 2);
+ if (!initMemoryEntries(dataSize, mapper)) {
+ return false;
+ }
+ dataP = getDataP(_memoryEntries);
+ dataP->fPageCnt = _pages;
+
+ if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
+ && (VM_KERN_MEMORY_NONE == _kernelTag)) {
+ _kernelTag = IOMemoryTag(kernel_map);
+ if (_kernelTag == gIOSurfaceTag) {
+ _userTag = VM_MEMORY_IOSURFACE;
+ }
+ }
+
+ if ((kIOMemoryPersistent & _flags) && !_memRef) {
+ IOReturn
+ err = memoryReferenceCreate(0, &_memRef);
+ if (kIOReturnSuccess != err) {
+ return false;
+ }
+ }
+
+ if ((_flags & kIOMemoryAutoPrepare)
+ && prepare() != kIOReturnSuccess) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/*
+ * free
+ *
+ * Free resources.
+ */
+void
+IOGeneralMemoryDescriptor::free()
+{
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ if (reserved && reserved->dp.memory) {
+ LOCK;
+ reserved->dp.memory = NULL;
+ UNLOCK;
+ }
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
+ ioGMDData * dataP;
+ if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
+ dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
+ dataP->fMappedBaseValid = dataP->fMappedBase = 0;
+ }
+ } else {
+ while (_wireCount) {
+ complete();
+ }
+ }
+
+ if (_memoryEntries) {
+ _memoryEntries.reset();
+ }
+
+ if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
+ if (kIOMemoryTypeUIO == type) {
+ uio_free((uio_t) _ranges.v);
+ }
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+ }
+#endif /* !__LP64__ */
+ else {
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
+
+ _ranges.v = NULL;
+ }
+
+ if (reserved) {
+ cleanKernelReserved(reserved);
+ if (reserved->dp.devicePager) {
+ // memEntry holds a ref on the device pager which owns reserved
+ // (IOMemoryDescriptorReserved) so no reserved access after this point
+ device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
+ } else {
+ IODelete(reserved, IOMemoryDescriptorReserved, 1);
+ }
+ reserved = NULL;
+ }
+
+ if (_memRef) {
+ memoryReferenceRelease(_memRef);
+ }
+ if (_prepareLock) {
+ IOLockFree(_prepareLock);
+ }
+
+ super::free();
+}
+
+#ifndef __LP64__
+void
+IOGeneralMemoryDescriptor::unmapFromKernel()
+{
+ panic("IOGMD::unmapFromKernel deprecated");
+}
+
+void
+IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
+{
+ panic("IOGMD::mapIntoKernel deprecated");
+}
+#endif /* !__LP64__ */
+
+/*
+ * getDirection:
+ *
+ * Get the direction of the transfer.
+ */
+IODirection
+IOMemoryDescriptor::getDirection() const
+{
+#ifndef __LP64__
+ if (_direction) {
+ return _direction;
+ }
+#endif /* !__LP64__ */
+ return (IODirection) (_flags & kIOMemoryDirectionMask);
+}
+
+/*
+ * getLength:
+ *
+ * Get the length of the transfer (over all ranges).
+ */
+IOByteCount
+IOMemoryDescriptor::getLength() const
+{
+ return _length;
+}
+
+void
+IOMemoryDescriptor::setTag( IOOptionBits tag )
+{
+ _tag = tag;
+}
+
+IOOptionBits
+IOMemoryDescriptor::getTag( void )
+{
+ return _tag;
+}
+
+uint64_t
+IOMemoryDescriptor::getFlags(void)
+{
+ return _flags;
+}
+
+#ifndef __LP64__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
+IOPhysicalAddress
+IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
+{
+ addr64_t physAddr = 0;
+
+ if (prepare() == kIOReturnSuccess) {
+ physAddr = getPhysicalSegment64( offset, length );
+ complete();
+ }
+
+ return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
+}
+
+#pragma clang diagnostic pop
+
+#endif /* !__LP64__ */
+
+IOByteCount
+IOMemoryDescriptor::readBytes
+(IOByteCount offset, void *bytes, IOByteCount length)
+{
+ addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount endoffset;
+ IOByteCount remaining;
+
+
+ // Check that this entire I/O is within the available range
+ if ((offset > _length)
+ || os_add_overflow(length, offset, &endoffset)
+ || (endoffset > _length)) {
+ assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
+ return 0;
+ }
+ if (offset >= _length) {
+ return 0;
+ }
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t srcAddr64;
+ IOByteCount srcLen;
+
+ srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
+ if (!srcAddr64) {
+ break;
+ }
+
+ // Clip segment length to remaining
+ if (srcLen > remaining) {
+ srcLen = remaining;
+ }
+
+ if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
+ srcLen = (UINT_MAX - PAGE_SIZE + 1);
+ }
+ copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
+ cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
+
+ dstAddr += srcLen;
+ offset += srcLen;
+ remaining -= srcLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ assert(!remaining);
+
+ return length - remaining;
+}
+
+IOByteCount
+IOMemoryDescriptor::writeBytes
+(IOByteCount inoffset, const void *bytes, IOByteCount length)
+{
+ addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
+ IOByteCount remaining;
+ IOByteCount endoffset;
+ IOByteCount offset = inoffset;
+
+ assert( !(kIOMemoryPreparedReadOnly & _flags));
+
+ // Check that this entire I/O is within the available range
+ if ((offset > _length)
+ || os_add_overflow(length, offset, &endoffset)
+ || (endoffset > _length)) {
+ assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
+ return 0;
+ }
+ if (kIOMemoryPreparedReadOnly & _flags) {
+ return 0;
+ }
+ if (offset >= _length) {
+ return 0;
+ }
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return 0;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+
+ remaining = length = min(length, _length - offset);
+ while (remaining) { // (process another target segment?)
+ addr64_t dstAddr64;
+ IOByteCount dstLen;
+
+ dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
+ if (!dstAddr64) {
+ break;
+ }
+
+ // Clip segment length to remaining
+ if (dstLen > remaining) {
+ dstLen = remaining;
+ }
+
+ if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
+ dstLen = (UINT_MAX - PAGE_SIZE + 1);
+ }
+ if (!srcAddr) {
+ bzero_phys(dstAddr64, (unsigned int) dstLen);
+ } else {
+ copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
+ cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
+ srcAddr += dstLen;
+ }
+ offset += dstLen;
+ remaining -= dstLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ assert(!remaining);
+
+#if defined(__x86_64__)
+ // copypv does not cppvFsnk on intel
+#else
+ if (!srcAddr) {
+ performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
+ }
+#endif
+
+ return length - remaining;
+}
+
+#ifndef __LP64__
+void
+IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
+{
+ panic("IOGMD::setPosition deprecated");
+}
+#endif /* !__LP64__ */
+
+static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
+static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
+
+uint64_t
+IOGeneralMemoryDescriptor::getPreparationID( void )
+{
+ ioGMDData *dataP;
+
+ if (!_wireCount) {
+ return kIOPreparationIDUnprepared;
+ }
+
+ if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
+ || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
+ IOMemoryDescriptor::setPreparationID();
+ return IOMemoryDescriptor::getPreparationID();
+ }
+
+ if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
+ return kIOPreparationIDUnprepared;
+ }
+
+ if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
+ OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
+ }
+ return dataP->fPreparationID;
+}
+
+void
+IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
+{
+ if (reserved->creator) {
+ task_deallocate(reserved->creator);
+ reserved->creator = NULL;
+ }
+}
+
+IOMemoryDescriptorReserved *
+IOMemoryDescriptor::getKernelReserved( void )
+{
+ if (!reserved) {
+ reserved = IONewZero(IOMemoryDescriptorReserved, 1);
+ }
+ return reserved;
+}
+
+void
+IOMemoryDescriptor::setPreparationID( void )
+{
+ if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
+ OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
+ }
+}
+
+uint64_t
+IOMemoryDescriptor::getPreparationID( void )
+{
+ if (reserved) {
+ return reserved->preparationID;
+ } else {
+ return kIOPreparationIDUnsupported;
+ }
+}
+
+void
+IOMemoryDescriptor::setDescriptorID( void )
+{
+ if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
+ SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
+ OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
+ }
+}
+
+uint64_t
+IOMemoryDescriptor::getDescriptorID( void )
+{
+ setDescriptorID();
+
+ if (reserved) {
+ return reserved->descriptorID;
+ } else {
+ return kIODescriptorIDInvalid;
+ }
+}
+
+IOReturn
+IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
+{
+ if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
+ return kIOReturnSuccess;
+ }
+
+ assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
+ if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
+ return kIOReturnBadArgument;
+ }
+
+ uint64_t descriptorID = getDescriptorID();
+ assert(descriptorID != kIODescriptorIDInvalid);
+ if (getDescriptorID() == kIODescriptorIDInvalid) {
+ return kIOReturnBadArgument;
+ }
+
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
+
+#if __LP64__
+ static const uint8_t num_segments_page = 8;
+#else
+ static const uint8_t num_segments_page = 4;
+#endif
+ static const uint8_t num_segments_long = 2;
+
+ IOPhysicalAddress segments_page[num_segments_page];
+ IOPhysicalRange segments_long[num_segments_long];
+ memset(segments_page, UINT32_MAX, sizeof(segments_page));
+ memset(segments_long, 0, sizeof(segments_long));
+
+ uint8_t segment_page_idx = 0;
+ uint8_t segment_long_idx = 0;
+
+ IOPhysicalRange physical_segment;
+ for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
+ physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
+
+ if (physical_segment.length == 0) {
+ break;
+ }
+
+ /**
+ * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
+ * buffer memory, pack segment events according to the following.
+ *
+ * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
+ * IOMDPA_MAPPED event emitted on by the current thread_id.
+ *
+ * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
+ * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
+ * - unmapped pages will have a ppn of MAX_INT_32
+ * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
+ * - address_0, length_0, address_0, length_1
+ * - unmapped pages will have an address of 0
+ *
+ * During each iteration do the following depending on the length of the mapping:
+ * 1. add the current segment to the appropriate queue of pending segments
+ * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
+ * 1a. if FALSE emit and reset all events in the previous queue
+ * 2. check if we have filled up the current queue of pending events
+ * 2a. if TRUE emit and reset all events in the pending queue
+ * 3. after completing all iterations emit events in the current queue
+ */
+
+ bool emit_page = false;
+ bool emit_long = false;
+ if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
+ segments_page[segment_page_idx] = physical_segment.address;
+ segment_page_idx++;
+
+ emit_long = segment_long_idx != 0;
+ emit_page = segment_page_idx == num_segments_page;
+
+ if (os_unlikely(emit_long)) {
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
+ segments_long[0].address, segments_long[0].length,
+ segments_long[1].address, segments_long[1].length);
+ }
+
+ if (os_unlikely(emit_page)) {
+#if __LP64__
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
+ ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
+ ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
+ ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
+#else
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ (ppnum_t) atop_32(segments_page[1]),
+ (ppnum_t) atop_32(segments_page[2]),
+ (ppnum_t) atop_32(segments_page[3]),
+ (ppnum_t) atop_32(segments_page[4]));
+#endif
+ }
+ } else {
+ segments_long[segment_long_idx] = physical_segment;
+ segment_long_idx++;
+
+ emit_page = segment_page_idx != 0;
+ emit_long = segment_long_idx == num_segments_long;
+
+ if (os_unlikely(emit_page)) {
+#if __LP64__
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
+ ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
+ ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
+ ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
+#else
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ (ppnum_t) atop_32(segments_page[1]),
+ (ppnum_t) atop_32(segments_page[2]),
+ (ppnum_t) atop_32(segments_page[3]),
+ (ppnum_t) atop_32(segments_page[4]));
+#endif
+ }
+
+ if (emit_long) {
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
+ segments_long[0].address, segments_long[0].length,
+ segments_long[1].address, segments_long[1].length);
+ }
+ }
+
+ if (os_unlikely(emit_page)) {
+ memset(segments_page, UINT32_MAX, sizeof(segments_page));
+ segment_page_idx = 0;
+ }
+
+ if (os_unlikely(emit_long)) {
+ memset(segments_long, 0, sizeof(segments_long));
+ segment_long_idx = 0;
+ }
+ }
+
+ if (segment_page_idx != 0) {
+ assert(segment_long_idx == 0);
+#if __LP64__
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
+ ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
+ ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
+ ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
+#else
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
+ (ppnum_t) atop_32(segments_page[1]),
+ (ppnum_t) atop_32(segments_page[2]),
+ (ppnum_t) atop_32(segments_page[3]),
+ (ppnum_t) atop_32(segments_page[4]));
+#endif
+ } else if (segment_long_idx != 0) {
+ assert(segment_page_idx == 0);
+ IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
+ segments_long[0].address, segments_long[0].length,
+ segments_long[1].address, segments_long[1].length);
+ }
+
+ return kIOReturnSuccess;
+}
+
+void
+IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
+{
+ _kernelTag = (vm_tag_t) kernelTag;
+ _userTag = (vm_tag_t) userTag;
+}
+
+uint32_t
+IOMemoryDescriptor::getVMTag(vm_map_t map)
+{
+ if (vm_kernel_map_is_kernel(map)) {
+ if (VM_KERN_MEMORY_NONE != _kernelTag) {
+ return (uint32_t) _kernelTag;
+ }
+ } else {
+ if (VM_KERN_MEMORY_NONE != _userTag) {
+ return (uint32_t) _userTag;
+ }
+ }
+ return IOMemoryTag(map);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ IOReturn err = kIOReturnSuccess;
+ DMACommandOps params;
+ IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+ ioGMDData *dataP;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
+ if (kIOMDDMAMap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ if (_memoryEntries && data->fMapper) {
+ bool remap, keepMap;
+ dataP = getDataP(_memoryEntries);
+
+ if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
+ dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
+ }
+ if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
+ dataP->fDMAMapAlignment = data->fMapSpec.alignment;
+ }
+
+ keepMap = (data->fMapper == gIOSystemMapper);
+ keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
+
+ if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
+ IOLockLock(_prepareLock);
+ }
+
+ remap = (!keepMap);
+ remap |= (dataP->fDMAMapNumAddressBits < 64)
+ && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
+ remap |= (dataP->fDMAMapAlignment > page_size);
+
+ if (remap || !dataP->fMappedBaseValid) {
+ err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
+ if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
+ dataP->fMappedBase = data->fAlloc;
+ dataP->fMappedBaseValid = true;
+ dataP->fMappedLength = data->fAllocLength;
+ data->fAllocLength = 0; // IOMD owns the alloc now
+ }
+ } else {
+ data->fAlloc = dataP->fMappedBase;
+ data->fAllocLength = 0; // give out IOMD map
+ md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
+ }
+
+ if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
+ IOLockUnlock(_prepareLock);
+ }
+ }
+ return err;
+ }
+ if (kIOMDDMAUnmap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
+
+ return kIOReturnSuccess;
+ }
+
+ if (kIOMDAddDMAMapSpec == op) {
+ if (dataSize < sizeof(IODMAMapSpecification)) {
+ return kIOReturnUnderrun;
+ }
+
+ IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ if (_memoryEntries) {
+ dataP = getDataP(_memoryEntries);
+ if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
+ dataP->fDMAMapNumAddressBits = data->numAddressBits;
+ }
+ if (data->alignment > dataP->fDMAMapAlignment) {
+ dataP->fDMAMapAlignment = data->alignment;
+ }
+ }
+ return kIOReturnSuccess;
+ }
+
+ if (kIOMDGetCharacteristics == op) {
+ if (dataSize < sizeof(IOMDDMACharacteristics)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = _length;
+ data->fSGCount = _rangesCount;
+ data->fPages = _pages;
+ data->fDirection = getDirection();
+ if (!_wireCount) {
+ data->fIsPrepared = false;
+ } else {
+ data->fIsPrepared = true;
+ data->fHighestPage = _highestPage;
+ if (_memoryEntries) {
+ dataP = getDataP(_memoryEntries);
+ ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt count = getNumIOPL(_memoryEntries, dataP);
+ if (count == 1) {
+ data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
+ }
+ }
+ }
+
+ return kIOReturnSuccess;
+ } else if (kIOMDDMAActive == op) {
+ if (params) {
+ int16_t prior;
+ prior = OSAddAtomic16(1, &md->_dmaReferences);
+ if (!prior) {
+ md->_mapName = NULL;
+ }
+ } else {
+ if (md->_dmaReferences) {
+ OSAddAtomic16(-1, &md->_dmaReferences);
+ } else {
+ panic("_dmaReferences underflow");
+ }
+ }
+ } else if (kIOMDWalkSegments != op) {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the next segment
+ struct InternalState {
+ IOMDDMAWalkSegmentArgs fIO;
+ mach_vm_size_t fOffset2Index;
+ mach_vm_size_t fNextOffset;
+ UInt fIndex;
+ } *isP;
+
+ // Find the next segment
+ if (dataSize < sizeof(*isP)) {
+ return kIOReturnUnderrun;
+ }
+
+ isP = (InternalState *) vData;
+ uint64_t offset = isP->fIO.fOffset;
+ uint8_t mapped = isP->fIO.fMapped;
+ uint64_t mappedBase;
+
+ if (mapped && (kIOMemoryRemote & _flags)) {
+ return kIOReturnNotAttached;
+ }
+
+ if (IOMapper::gSystem && mapped
+ && (!(kIOMemoryHostOnly & _flags))
+ && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
+// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
+ return kIOReturnNoMemory;
+ }
+
+ dataP = getDataP(_memoryEntries);
+ if (dataP->fMapper) {
+ IODMAMapSpecification mapSpec;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+ mapSpec.alignment = dataP->fDMAMapAlignment;
+ err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
+ if (kIOReturnSuccess != err) {
+ return err;
+ }
+ dataP->fMappedBaseValid = true;
+ }
+ }
+
+ if (mapped) {
+ if (IOMapper::gSystem
+ && (!(kIOMemoryHostOnly & _flags))
+ && _memoryEntries
+ && (dataP = getDataP(_memoryEntries))
+ && dataP->fMappedBaseValid) {
+ mappedBase = dataP->fMappedBase;
+ } else {
+ mapped = 0;
+ }
+ }
+
+ if (offset >= _length) {
+ return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
+ }
+
+ // Validate the previous offset
+ UInt ind;
+ mach_vm_size_t off2Ind = isP->fOffset2Index;
+ if (!params
+ && offset
+ && (offset == isP->fNextOffset || off2Ind <= offset)) {
+ ind = isP->fIndex;
+ } else {
+ ind = off2Ind = 0; // Start from beginning
+ }
+ mach_vm_size_t length;
+ UInt64 address;
+
+ if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
+ // Physical address based memory descriptor
+ const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
+
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
+
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
+
+ if (true && mapped) {
+ address = mappedBase + offset;
+ } else {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
+ }
+
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#ifndef __LP64__
+ else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
+ // Physical address based memory descriptor
+ const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
+
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
+
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
+
+ if (true && mapped) {
+ address = mappedBase + offset;
+ } else {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
+ }
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#endif /* !__LP64__ */
+ else {
+ do {
+ if (!_wireCount) {
+ panic("IOGMD: not wired for the IODMACommand");
+ }
+
+ assert(_memoryEntries);
+
+ dataP = getDataP(_memoryEntries);
+ const ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+ upl_page_info_t *pageList = getPageList(dataP);
+
+ assert(numIOPLs > 0);
+
+ // Scan through iopl info blocks looking for block containing offset
+ while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
+ ind++;
+ }
+
+ // Go back to actual range as search goes past it
+ ioPLBlock ioplInfo = ioplList[ind - 1];
+ off2Ind = ioplInfo.fIOMDOffset;
+
+ if (ind < numIOPLs) {
+ length = ioplList[ind].fIOMDOffset;
+ } else {
+ length = _length;
+ }
+ length -= offset; // Remainder within iopl
+
+ // Subtract offset till this iopl in total list
+ offset -= off2Ind;
+
+ // If a mapped address is requested and this is a pre-mapped IOPL
+ // then just need to compute an offset relative to the mapped base.
+ if (mapped) {
+ offset += (ioplInfo.fPageOffset & PAGE_MASK);
+ address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
+ continue; // Done leave do/while(false) now
+ }
+
+ // The offset is rebased into the current iopl.
+ // Now add the iopl 1st page offset.
+ offset += ioplInfo.fPageOffset;
+
+ // For external UPLs the fPageInfo field points directly to
+ // the upl's upl_page_info_t array.
+ if (ioplInfo.fFlags & kIOPLExternUPL) {
+ pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
+ } else {
+ pageList = &pageList[ioplInfo.fPageInfo];
+ }
+
+ // Check for direct device non-paged memory
+ if (ioplInfo.fFlags & kIOPLOnDevice) {
+ address = ptoa_64(pageList->phys_addr) + offset;
+ continue; // Done leave do/while(false) now
+ }
+
+ // Now we need compute the index into the pageList
+ UInt pageInd = atop_32(offset);
+ offset &= PAGE_MASK;
+
+ // Compute the starting address of this segment
+ IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
+ if (!pageAddr) {
+ panic("!pageList phys_addr");
+ }
+
+ address = ptoa_64(pageAddr) + offset;
+
+ // length is currently set to the length of the remainider of the iopl.
+ // We need to check that the remainder of the iopl is contiguous.
+ // This is indicated by pageList[ind].phys_addr being sequential.
+ IOByteCount contigLength = PAGE_SIZE - offset;
+ while (contigLength < length
+ && ++pageAddr == pageList[++pageInd].phys_addr) {
+ contigLength += PAGE_SIZE;
+ }
+
+ if (contigLength < length) {
+ length = contigLength;
+ }
+
+
+ assert(address);
+ assert(length);
+ } while (false);
+ }
+
+ // Update return values and state
+ isP->fIO.fIOVMAddr = address;
+ isP->fIO.fLength = length;
+ isP->fIndex = ind;
+ isP->fOffset2Index = off2Ind;
+ isP->fNextOffset = isP->fIO.fOffset + length;
+
+ return kIOReturnSuccess;
+}
+
+addr64_t
+IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
+{
+ IOReturn ret;
+ mach_vm_address_t address = 0;
+ mach_vm_size_t length = 0;
+ IOMapper * mapper = gIOSystemMapper;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ if (lengthOfSegment) {
+ *lengthOfSegment = 0;
+ }
+
+ if (offset >= _length) {
+ return 0;
+ }
+
+ // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
+ // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
+ // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
+ // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
+
+ if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
+ unsigned rangesIndex = 0;
+ Ranges vec = _ranges;
+ mach_vm_address_t addr;
+
+ // Find starting address within the vector of ranges
+ for (;;) {
+ getAddrLenForInd(addr, length, type, vec, rangesIndex);
+ if (offset < length) {
+ break;
+ }
+ offset -= length; // (make offset relative)
+ rangesIndex++;
+ }
+
+ // Now that we have the starting range,
+ // lets find the last contiguous range
+ addr += offset;
+ length -= offset;
+
+ for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
+ mach_vm_address_t newAddr;
+ mach_vm_size_t newLen;
+
+ getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
+ if (addr + length != newAddr) {
+ break;
+ }
+ length += newLen;
+ }
+ if (addr) {
+ address = (IOPhysicalAddress) addr; // Truncate address to 32bit
+ }
+ } else {
+ IOMDDMAWalkSegmentState _state;
+ IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
+
+ state->fOffset = offset;
+ state->fLength = _length - offset;
+ state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
+
+ ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
+
+ if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
+ DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
+ ret, this, state->fOffset,
+ state->fIOVMAddr, state->fLength);
+ }
+ if (kIOReturnSuccess == ret) {
+ address = state->fIOVMAddr;
+ length = state->fLength;
+ }
+
+ // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
+ // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
+
+ if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
+ if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
+ addr64_t origAddr = address;
+ IOByteCount origLen = length;
+
+ address = mapper->mapToPhysicalAddress(origAddr);
+ length = page_size - (address & (page_size - 1));
+ while ((length < origLen)
+ && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
+ length += page_size;
+ }
+ if (length > origLen) {
+ length = origLen;
+ }
+ }
+ }
+ }
+
+ if (!address) {
+ length = 0;
+ }
+
+ if (lengthOfSegment) {
+ *lengthOfSegment = length;
+ }
+
+ return address;
+}
+
+#ifndef __LP64__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+addr64_t
+IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
+{
+ addr64_t address = 0;
+
+ if (options & _kIOMemorySourceSegment) {
+ address = getSourceSegment(offset, lengthOfSegment);
+ } else if (options & kIOMemoryMapperNone) {
+ address = getPhysicalSegment64(offset, lengthOfSegment);
+ } else {
+ address = getPhysicalSegment(offset, lengthOfSegment);
+ }
+
+ return address;
+}
+#pragma clang diagnostic pop
+
+addr64_t
+IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
+}
+
+IOPhysicalAddress
+IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ addr64_t address = 0;
+ IOByteCount length = 0;
+
+ address = getPhysicalSegment(offset, lengthOfSegment, 0);
+
+ if (lengthOfSegment) {
+ length = *lengthOfSegment;
+ }
+
+ if ((address + length) > 0x100000000ULL) {
+ panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
+ address, (long) length, (getMetaClass())->getClassName());
+ }
+
+ return (IOPhysicalAddress) address;
+}
+
+addr64_t
+IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ IOPhysicalAddress phys32;
+ IOByteCount length;
+ addr64_t phys64;
+ IOMapper * mapper = NULL;
+
+ phys32 = getPhysicalSegment(offset, lengthOfSegment);
+ if (!phys32) {
+ return 0;
+ }
+
+ if (gIOSystemMapper) {
+ mapper = gIOSystemMapper;
+ }
+
+ if (mapper) {
+ IOByteCount origLen;
+
+ phys64 = mapper->mapToPhysicalAddress(phys32);
+ origLen = *lengthOfSegment;
+ length = page_size - (phys64 & (page_size - 1));
+ while ((length < origLen)
+ && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
+ length += page_size;
+ }
+ if (length > origLen) {
+ length = origLen;
+ }
+
+ *lengthOfSegment = length;
+ } else {
+ phys64 = (addr64_t) phys32;
+ }
+
+ return phys64;
+}
+
+IOPhysicalAddress
+IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
+}
+
+IOPhysicalAddress
+IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
+}
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+void *
+IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+ IOByteCount * lengthOfSegment)
+{
+ if (_task == kernel_task) {
+ return (void *) getSourceSegment(offset, lengthOfSegment);
+ } else {
+ panic("IOGMD::getVirtualSegment deprecated");
+ }
+
+ return NULL;
+}
+#pragma clang diagnostic pop
+#endif /* !__LP64__ */
+
+IOReturn
+IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
+ DMACommandOps params;
+ IOReturn err;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
+ if (kIOMDGetCharacteristics == op) {
+ if (dataSize < sizeof(IOMDDMACharacteristics)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = getLength();
+ data->fSGCount = 0;
+ data->fDirection = getDirection();
+ data->fIsPrepared = true; // Assume prepared - fails safe
+ } else if (kIOMDWalkSegments == op) {
+ if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
+ return kIOReturnUnderrun;
+ }
+
+ IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
+ IOByteCount offset = (IOByteCount) data->fOffset;
+ IOPhysicalLength length, nextLength;
+ addr64_t addr, nextAddr;
+
+ if (data->fMapped) {
+ panic("fMapped %p %s %qx\n", this, getMetaClass()->getClassName(), (uint64_t) getLength());
+ }
+ addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
+ offset += length;
+ while (offset < getLength()) {
+ nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
+ if ((addr + length) != nextAddr) {
+ break;
+ }
+ length += nextLength;
+ offset += nextLength;
+ }
+ data->fIOVMAddr = addr;
+ data->fLength = length;
+ } else if (kIOMDAddDMAMapSpec == op) {
+ return kIOReturnUnsupported;
+ } else if (kIOMDDMAMap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
+
+ return err;
+ } else if (kIOMDDMAUnmap == op) {
+ if (dataSize < sizeof(IOMDDMAMapArgs)) {
+ return kIOReturnUnderrun;
+ }
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
+
+ return kIOReturnSuccess;
+ } else {
+ return kIOReturnBadArgument;
+ }
+
+ return kIOReturnSuccess;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnSuccess;
+
+ vm_purgable_t control;
+ int state;
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if (_memRef) {
+ err = super::setPurgeable(newState, oldState);
+ } else {
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ do{
+ // Find the appropriate vm_map for the given task
+ vm_map_t curMap;
+ if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
+ err = kIOReturnNotReady;
+ break;
+ } else if (!_task) {
+ err = kIOReturnUnsupported;
+ break;
+ } else {
+ curMap = get_task_map(_task);
+ if (NULL == curMap) {
+ err = KERN_INVALID_ARGUMENT;
+ break;
+ }
+ }
+
+ // can only do one range
+ Ranges vec = _ranges;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ mach_vm_address_t addr;
+ mach_vm_size_t len;
+ getAddrLenForInd(addr, len, type, vec, 0);
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (kIOReturnSuccess != err) {
+ break;
+ }
+ err = vm_map_purgable_control(curMap, addr, control, &state);
+ if (oldState) {
+ if (kIOReturnSuccess == err) {
+ err = purgeableStateBits(&state);
+ *oldState = state;
+ }
+ }
+ }while (false);
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+ }
+
+ return err;
+}
+
+IOReturn
+IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnNotReady;
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ if (_memRef) {
+ err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
+ }
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ return err;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
+ int newLedgerTag,
+ IOOptionBits newLedgerOptions )
+{
+ IOReturn err = kIOReturnSuccess;
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if (iokit_iomd_setownership_enabled == FALSE) {
+ return kIOReturnUnsupported;
+ }
+
+ if (_memRef) {
+ err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
+ } else {
+ err = kIOReturnUnsupported;
+ }
+
+ return err;
+}
+
+IOReturn
+IOMemoryDescriptor::setOwnership( task_t newOwner,
+ int newLedgerTag,
+ IOOptionBits newLedgerOptions )
+{
+ IOReturn err = kIOReturnNotReady;
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if (iokit_iomd_setownership_enabled == FALSE) {
+ return kIOReturnUnsupported;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ if (_memRef) {
+ err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
+ } else {
+ IOMultiMemoryDescriptor * mmd;
+ IOSubMemoryDescriptor * smd;
+ if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
+ err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
+ } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
+ err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
+ }
+ }
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ return err;
+}
+
+
+uint64_t
+IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
+{
+ uint64_t length;
+
+ if (_memRef) {
+ length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
+ } else {
+ IOByteCount iterate, segLen;
+ IOPhysicalAddress sourceAddr, sourceAlign;
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ length = 0;
+ iterate = 0;
+ while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
+ sourceAlign = (sourceAddr & page_mask);
+ if (offset && !iterate) {
+ *offset = sourceAlign;
+ }
+ length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
+ iterate += segLen;
+ }
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+ }
+
+ return length;
+}
+
+
+IOReturn
+IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
+ IOByteCount * dirtyPageCount )
+{
+ IOReturn err = kIOReturnNotReady;
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+ if (_memRef) {
+ err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
+ } else {
+ IOMultiMemoryDescriptor * mmd;
+ IOSubMemoryDescriptor * smd;
+ if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
+ err = smd->getPageCounts(residentPageCount, dirtyPageCount);
+ } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
+ err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
+ }
+ }
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ return err;
+}
+
+
+#if defined(__arm__) || defined(__arm64__)
+extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
+extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
+#else /* defined(__arm__) || defined(__arm64__) */
+extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
+extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
+#endif /* defined(__arm__) || defined(__arm64__) */
+
+static void
+SetEncryptOp(addr64_t pa, unsigned int count)
+{
+ ppnum_t page, end;
+
+ page = (ppnum_t) atop_64(round_page_64(pa));
+ end = (ppnum_t) atop_64(trunc_page_64(pa + count));
+ for (; page < end; page++) {
+ pmap_clear_noencrypt(page);
+ }
+}
+
+static void
+ClearEncryptOp(addr64_t pa, unsigned int count)
+{
+ ppnum_t page, end;
+
+ page = (ppnum_t) atop_64(round_page_64(pa));
+ end = (ppnum_t) atop_64(trunc_page_64(pa + count));
+ for (; page < end; page++) {
+ pmap_set_noencrypt(page);
+ }
+}
+
+IOReturn
+IOMemoryDescriptor::performOperation( IOOptionBits options,
+ IOByteCount offset, IOByteCount length )
+{
+ IOByteCount remaining;
+ unsigned int res;
+ void (*func)(addr64_t pa, unsigned int count) = NULL;
+#if defined(__arm__) || defined(__arm64__)
+ void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
+#endif
+
+ assert(!(kIOMemoryRemote & _flags));
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ switch (options) {
+ case kIOMemoryIncoherentIOFlush:
+#if defined(__arm__) || defined(__arm64__)
+ func_ext = &dcache_incoherent_io_flush64;
+#if __ARM_COHERENT_IO__
+ func_ext(0, 0, 0, &res);
+ return kIOReturnSuccess;
+#else /* __ARM_COHERENT_IO__ */
+ break;
+#endif /* __ARM_COHERENT_IO__ */
+#else /* defined(__arm__) || defined(__arm64__) */
+ func = &dcache_incoherent_io_flush64;
+ break;
+#endif /* defined(__arm__) || defined(__arm64__) */
+ case kIOMemoryIncoherentIOStore:
+#if defined(__arm__) || defined(__arm64__)
+ func_ext = &dcache_incoherent_io_store64;
+#if __ARM_COHERENT_IO__
+ func_ext(0, 0, 0, &res);
+ return kIOReturnSuccess;
+#else /* __ARM_COHERENT_IO__ */
+ break;
+#endif /* __ARM_COHERENT_IO__ */
+#else /* defined(__arm__) || defined(__arm64__) */
+ func = &dcache_incoherent_io_store64;
+ break;
+#endif /* defined(__arm__) || defined(__arm64__) */
+
+ case kIOMemorySetEncrypted:
+ func = &SetEncryptOp;
+ break;
+ case kIOMemoryClearEncrypted:
+ func = &ClearEncryptOp;
+ break;
+ }
+
+#if defined(__arm__) || defined(__arm64__)
+ if ((func == NULL) && (func_ext == NULL)) {
+ return kIOReturnUnsupported;
+ }
+#else /* defined(__arm__) || defined(__arm64__) */
+ if (!func) {
+ return kIOReturnUnsupported;
+ }
+#endif /* defined(__arm__) || defined(__arm64__) */
+
+ if (kIOMemoryThreadSafe & _flags) {
+ LOCK;
+ }
+
+ res = 0x0UL;
+ remaining = length = min(length, getLength() - offset);
+ while (remaining) {
+ // (process another target segment?)
+ addr64_t dstAddr64;
+ IOByteCount dstLen;
+
+ dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
+ if (!dstAddr64) {
+ break;
+ }
+
+ // Clip segment length to remaining
+ if (dstLen > remaining) {
+ dstLen = remaining;
+ }
+ if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
+ dstLen = (UINT_MAX - PAGE_SIZE + 1);
+ }
+ if (remaining > UINT_MAX) {
+ remaining = UINT_MAX;
+ }
+
+#if defined(__arm__) || defined(__arm64__)
+ if (func) {
+ (*func)(dstAddr64, (unsigned int) dstLen);
+ }
+ if (func_ext) {
+ (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
+ if (res != 0x0UL) {
+ remaining = 0;
+ break;
+ }
+ }
+#else /* defined(__arm__) || defined(__arm64__) */
+ (*func)(dstAddr64, (unsigned int) dstLen);
+#endif /* defined(__arm__) || defined(__arm64__) */
+
+ offset += dstLen;
+ remaining -= dstLen;
+ }
+
+ if (kIOMemoryThreadSafe & _flags) {
+ UNLOCK;
+ }
+
+ return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
+}
+
+/*
+ *
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+
+extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
+
+/* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
+ * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
+ * kernel non-text data -- should we just add another range instead?
+ */
+#define io_kernel_static_start vm_kernel_stext
+#define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
+
+#elif defined(__arm__) || defined(__arm64__)
+
+extern vm_offset_t static_memory_end;
+
+#if defined(__arm64__)
+#define io_kernel_static_start vm_kext_base
+#else /* defined(__arm64__) */
+#define io_kernel_static_start vm_kernel_stext
+#endif /* defined(__arm64__) */
+
+#define io_kernel_static_end static_memory_end
+
+#else
+#error io_kernel_static_end is undefined for this architecture
+#endif
+
+static kern_return_t
+io_get_kernel_static_upl(
+ vm_map_t /* map */,
+ uintptr_t offset,
+ upl_size_t *upl_size,
+ unsigned int *page_offset,
+ upl_t *upl,
+ upl_page_info_array_t page_list,
+ unsigned int *count,
+ ppnum_t *highest_page)
+{
+ unsigned int pageCount, page;
+ ppnum_t phys;
+ ppnum_t highestPage = 0;
+
+ pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
+ if (pageCount > *count) {
+ pageCount = *count;
+ }
+ *upl_size = (upl_size_t) ptoa_64(pageCount);
+
+ *upl = NULL;
+ *page_offset = ((unsigned int) page_mask & offset);
+
+ for (page = 0; page < pageCount; page++) {
+ phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
+ if (!phys) {
+ break;
+ }
+ page_list[page].phys_addr = phys;
+ page_list[page].free_when_done = 0;
+ page_list[page].absent = 0;
+ page_list[page].dirty = 0;
+ page_list[page].precious = 0;
+ page_list[page].device = 0;
+ if (phys > highestPage) {
+ highestPage = phys;
+ }
+ }
+
+ *highest_page = highestPage;
+
+ return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
+{
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ IOReturn error = kIOReturnSuccess;
+ ioGMDData *dataP;
+ upl_page_info_array_t pageInfo;
+ ppnum_t mapBase;
+ vm_tag_t tag = VM_KERN_MEMORY_NONE;
+ mach_vm_size_t numBytesWired = 0;
+
+ assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
+
+ if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
+ forDirection = (IODirection) (forDirection | getDirection());
+ }
+
+ dataP = getDataP(_memoryEntries);
+ upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
+ switch (kIODirectionOutIn & forDirection) {
+ case kIODirectionOut:
+ // Pages do not need to be marked as dirty on commit
+ uplFlags = UPL_COPYOUT_FROM;
+ dataP->fDMAAccess = kIODMAMapReadAccess;
+ break;
+
+ case kIODirectionIn:
+ dataP->fDMAAccess = kIODMAMapWriteAccess;
+ uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
+ break;
+
+ default:
+ dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
+ uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
+ break;
+ }
+
+ if (_wireCount) {
+ if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
+ OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
+ error = kIOReturnNotWritable;
+ }
+ } else {
+ IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
+ IOMapper *mapper;
+
+ mapper = dataP->fMapper;
+ dataP->fMappedBaseValid = dataP->fMappedBase = 0;
+
+ uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
+ tag = _kernelTag;
+ if (VM_KERN_MEMORY_NONE == tag) {
+ tag = IOMemoryTag(kernel_map);
+ }
+
+ if (kIODirectionPrepareToPhys32 & forDirection) {
+ if (!mapper) {
+ uplFlags |= UPL_NEED_32BIT_ADDR;
+ }
+ if (dataP->fDMAMapNumAddressBits > 32) {
+ dataP->fDMAMapNumAddressBits = 32;
+ }
+ }
+ if (kIODirectionPrepareNoFault & forDirection) {
+ uplFlags |= UPL_REQUEST_NO_FAULT;
+ }
+ if (kIODirectionPrepareNoZeroFill & forDirection) {
+ uplFlags |= UPL_NOZEROFILLIO;
+ }
+ if (kIODirectionPrepareNonCoherent & forDirection) {
+ uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
+ }
+
+ mapBase = 0;
+
+ // Note that appendBytes(NULL) zeros the data up to the desired length
+ // and the length parameter is an unsigned int
+ size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
+ if (uplPageSize > ((unsigned int)uplPageSize)) {
+ error = kIOReturnNoMemory;
+ traceInterval.setEndArg2(error);
+ return error;
+ }
+ if (!_memoryEntries->appendBytes(NULL, (unsigned int) uplPageSize)) {
+ error = kIOReturnNoMemory;
+ traceInterval.setEndArg2(error);
+ return error;
+ }
+ dataP = NULL;
+
+ // Find the appropriate vm_map for the given task
+ vm_map_t curMap;
+ if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
+ curMap = NULL;
+ } else {
+ curMap = get_task_map(_task);
+ }
+
+ // Iterate over the vector of virtual ranges
+ Ranges vec = _ranges;
+ unsigned int pageIndex = 0;
+ IOByteCount mdOffset = 0;
+ ppnum_t highestPage = 0;
+ bool byteAlignUPL;
+
+ IOMemoryEntry * memRefEntry = NULL;
+ if (_memRef) {
+ memRefEntry = &_memRef->entries[0];
+ byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
+ } else {
+ byteAlignUPL = true;
+ }
+
+ for (UInt range = 0; mdOffset < _length; range++) {
+ ioPLBlock iopl;
+ mach_vm_address_t startPage, startPageOffset;
+ mach_vm_size_t numBytes;
+ ppnum_t highPage = 0;
+
+ if (_memRef) {
+ if (range >= _memRef->count) {
+ panic("memRefEntry");
+ }
+ memRefEntry = &_memRef->entries[range];
+ numBytes = memRefEntry->size;
+ startPage = -1ULL;
+ if (byteAlignUPL) {
+ startPageOffset = 0;
+ } else {
+ startPageOffset = (memRefEntry->start & PAGE_MASK);
+ }
+ } else {
+ // Get the startPage address and length of vec[range]
+ getAddrLenForInd(startPage, numBytes, type, vec, range);
+ if (byteAlignUPL) {
+ startPageOffset = 0;
+ } else {
+ startPageOffset = startPage & PAGE_MASK;
+ startPage = trunc_page_64(startPage);
+ }
+ }
+ iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
+ numBytes += startPageOffset;
+
+ if (mapper) {
+ iopl.fMappedPage = mapBase + pageIndex;
+ } else {
+ iopl.fMappedPage = 0;
+ }
+
+ // Iterate over the current range, creating UPLs
+ while (numBytes) {
+ vm_address_t kernelStart = (vm_address_t) startPage;
+ vm_map_t theMap;
+ if (curMap) {
+ theMap = curMap;
+ } else if (_memRef) {
+ theMap = NULL;
+ } else {
+ assert(_task == kernel_task);
+ theMap = IOPageableMapForAddress(kernelStart);
+ }
+
+ // ioplFlags is an in/out parameter
+ upl_control_flags_t ioplFlags = uplFlags;
+ dataP = getDataP(_memoryEntries);
+ pageInfo = getPageList(dataP);
+ upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
+
+ mach_vm_size_t ioplPhysSize;
+ upl_size_t ioplSize;
+ unsigned int numPageInfo;
+
+ if (_memRef) {
+ error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
+ DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
+ } else {
+ error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
+ DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
+ }
+ if (error != KERN_SUCCESS) {
+ if (_memRef) {
+ DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
+ } else {
+ DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
+ }
+ printf("entry size error %d\n", error);
+ goto abortExit;
+ }
+ ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
+ numPageInfo = atop_32(ioplPhysSize);
+ if (byteAlignUPL) {
+ if (numBytes > ioplPhysSize) {
+ ioplSize = ((typeof(ioplSize))ioplPhysSize);
+ } else {
+ ioplSize = ((typeof(ioplSize))numBytes);
+ }
+ } else {
+ ioplSize = ((typeof(ioplSize))ioplPhysSize);
+ }
+
+ if (_memRef) {
+ memory_object_offset_t entryOffset;
+
+ entryOffset = mdOffset;
+ if (byteAlignUPL) {
+ entryOffset = (entryOffset - memRefEntry->offset);
+ } else {
+ entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
+ }
+ if (ioplSize > (memRefEntry->size - entryOffset)) {
+ ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
+ }
+ error = memory_object_iopl_request(memRefEntry->entry,
+ entryOffset,
+ &ioplSize,
+ &iopl.fIOPL,
+ baseInfo,
+ &numPageInfo,
+ &ioplFlags,
+ tag);
+ } else if ((theMap == kernel_map)
+ && (kernelStart >= io_kernel_static_start)
+ && (kernelStart < io_kernel_static_end)) {
+ error = io_get_kernel_static_upl(theMap,
+ kernelStart,
+ &ioplSize,
+ &iopl.fPageOffset,
+ &iopl.fIOPL,
+ baseInfo,
+ &numPageInfo,
+ &highPage);
+ } else {
+ assert(theMap);
+ error = vm_map_create_upl(theMap,
+ startPage,
+ (upl_size_t*)&ioplSize,
+ &iopl.fIOPL,
+ baseInfo,
+ &numPageInfo,
+ &ioplFlags,
+ tag);
+ }
+
+ if (error != KERN_SUCCESS) {
+ traceInterval.setEndArg2(error);
+ DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
+ goto abortExit;
+ }
+
+ assert(ioplSize);
+
+ if (iopl.fIOPL) {
+ highPage = upl_get_highest_page(iopl.fIOPL);
+ }
+ if (highPage > highestPage) {
+ highestPage = highPage;
+ }
+
+ if (baseInfo->device) {
+ numPageInfo = 1;
+ iopl.fFlags = kIOPLOnDevice;
+ } else {
+ iopl.fFlags = 0;
+ }
+
+ if (byteAlignUPL) {
+ if (iopl.fIOPL) {
+ DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
+ iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
+ }
+ if (startPage != (mach_vm_address_t)-1) {
+ // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
+ startPage -= iopl.fPageOffset;
+ }
+ ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
+ numBytes += iopl.fPageOffset;
+ }
+
+ iopl.fIOMDOffset = mdOffset;
+ iopl.fPageInfo = pageIndex;
+
+ if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
+ // Clean up partial created and unsaved iopl
+ if (iopl.fIOPL) {
+ upl_abort(iopl.fIOPL, 0);
+ upl_deallocate(iopl.fIOPL);
+ }
+ error = kIOReturnNoMemory;
+ traceInterval.setEndArg2(error);
+ goto abortExit;
+ }
+ dataP = NULL;
+
+ // Check for a multiple iopl's in one virtual range
+ pageIndex += numPageInfo;
+ mdOffset -= iopl.fPageOffset;
+ numBytesWired += ioplSize;
+ if (ioplSize < numBytes) {
+ numBytes -= ioplSize;
+ if (startPage != (mach_vm_address_t)-1) {
+ startPage += ioplSize;
+ }
+ mdOffset += ioplSize;
+ iopl.fPageOffset = 0;
+ if (mapper) {
+ iopl.fMappedPage = mapBase + pageIndex;
+ }
+ } else {
+ mdOffset += numBytes;
+ break;
+ }
+ }
+ }
+
+ _highestPage = highestPage;
+ DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
+
+ if (UPL_COPYOUT_FROM & uplFlags) {
+ _flags |= kIOMemoryPreparedReadOnly;
+ }
+ traceInterval.setEndCodes(numBytesWired, error);
+ }
+
+#if IOTRACKING
+ if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
+ dataP = getDataP(_memoryEntries);
+ if (!dataP->fWireTracking.link.next) {
+ IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
+ }
+ }
+#endif /* IOTRACKING */
+
+ return error;
+
+abortExit:
+ {
+ dataP = getDataP(_memoryEntries);
+ UInt done = getNumIOPL(_memoryEntries, dataP);
+ ioPLBlock *ioplList = getIOPLList(dataP);
+
+ for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
+ if (ioplList[ioplIdx].fIOPL) {
+ upl_abort(ioplList[ioplIdx].fIOPL, 0);
+ upl_deallocate(ioplList[ioplIdx].fIOPL);
+ }
+ }
+ (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
+ }
+
+ if (error == KERN_FAILURE) {
+ error = kIOReturnCannotWire;
+ } else if (error == KERN_MEMORY_ERROR) {
+ error = kIOReturnNoResources;
+ }
+
+ return error;
+}
+
+bool
+IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
+{
+ ioGMDData * dataP;
+ unsigned dataSize;
+
+ if (size > UINT_MAX) {
+ return false;
+ }
+ dataSize = (unsigned int) size;
+ if (!_memoryEntries) {
+ _memoryEntries = OSData::withCapacity(dataSize);
+ if (!_memoryEntries) {
+ return false;
+ }
+ } else if (!_memoryEntries->initWithCapacity(dataSize)) {
+ return false;
+ }
+
+ _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
+ dataP = getDataP(_memoryEntries);
+
+ if (mapper == kIOMapperWaitSystem) {
+ IOMapper::checkForSystemMapper();
+ mapper = IOMapper::gSystem;
+ }
+ dataP->fMapper = mapper;
+ dataP->fPageCnt = 0;
+ dataP->fMappedBase = 0;
+ dataP->fDMAMapNumAddressBits = 64;
+ dataP->fDMAMapAlignment = 0;
+ dataP->fPreparationID = kIOPreparationIDUnprepared;
+ dataP->fCompletionError = false;
+ dataP->fMappedBaseValid = false;
+
+ return true;
+}
+
+IOReturn
+IOMemoryDescriptor::dmaMap(
+ IOMapper * mapper,
+ IOMemoryDescriptor * memory,
+ IODMACommand * command,
+ const IODMAMapSpecification * mapSpec,
+ uint64_t offset,
+ uint64_t length,
+ uint64_t * mapAddress,
+ uint64_t * mapLength)
+{
+ IOReturn err;
+ uint32_t mapOptions;
+
+ mapOptions = 0;
+ mapOptions |= kIODMAMapReadAccess;
+ if (!(kIOMemoryPreparedReadOnly & _flags)) {
+ mapOptions |= kIODMAMapWriteAccess;
+ }
+
+ err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
+ mapSpec, command, NULL, mapAddress, mapLength);
+
+ if (kIOReturnSuccess == err) {
+ dmaMapRecord(mapper, command, *mapLength);
+ }
+
+ return err;
+}
+
+void
+IOMemoryDescriptor::dmaMapRecord(
+ IOMapper * mapper,
+ IODMACommand * command,
+ uint64_t mapLength)
+{
+ IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
+ kern_allocation_name_t alloc;
+ int16_t prior;
+
+ if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
+ kern_allocation_update_size(mapper->fAllocName, mapLength);
+ }
+
+ if (!command) {
+ return;
+ }
+ prior = OSAddAtomic16(1, &_dmaReferences);
+ if (!prior) {
+ if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
+ _mapName = alloc;
+ mapLength = _length;
+ kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
+ } else {
+ _mapName = NULL;
+ }
+ }
+}
+
+IOReturn
+IOMemoryDescriptor::dmaUnmap(
+ IOMapper * mapper,
+ IODMACommand * command,
+ uint64_t offset,
+ uint64_t mapAddress,
+ uint64_t mapLength)
+{
+ IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
+ IOReturn ret;
+ kern_allocation_name_t alloc;
+ kern_allocation_name_t mapName;
+ int16_t prior;
+
+ mapName = NULL;
+ prior = 0;
+ if (command) {
+ mapName = _mapName;
+ if (_dmaReferences) {
+ prior = OSAddAtomic16(-1, &_dmaReferences);
+ } else {
+ panic("_dmaReferences underflow");
+ }
+ }
+
+ if (!mapLength) {
+ traceInterval.setEndArg1(kIOReturnSuccess);
+ return kIOReturnSuccess;
+ }
+
+ ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
+
+ if ((alloc = mapper->fAllocName)) {
+ kern_allocation_update_size(alloc, -mapLength);
+ if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
+ mapLength = _length;
+ kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
+ }
+ }
+
+ traceInterval.setEndArg1(ret);
+ return ret;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::dmaMap(
+ IOMapper * mapper,
+ IOMemoryDescriptor * memory,
+ IODMACommand * command,
+ const IODMAMapSpecification * mapSpec,
+ uint64_t offset,
+ uint64_t length,
+ uint64_t * mapAddress,
+ uint64_t * mapLength)
+{
+ IOReturn err = kIOReturnSuccess;
+ ioGMDData * dataP;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ *mapAddress = 0;
+ if (kIOMemoryHostOnly & _flags) {
+ return kIOReturnSuccess;
+ }
+ if (kIOMemoryRemote & _flags) {
+ return kIOReturnNotAttached;
+ }
+
+ if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
+ || offset || (length != _length)) {
+ err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
+ } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
+ const ioPLBlock * ioplList = getIOPLList(dataP);
+ upl_page_info_t * pageList;
+ uint32_t mapOptions = 0;
+
+ IODMAMapSpecification mapSpec;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+ mapSpec.alignment = dataP->fDMAMapAlignment;
+
+ // For external UPLs the fPageInfo field points directly to
+ // the upl's upl_page_info_t array.
+ if (ioplList->fFlags & kIOPLExternUPL) {
+ pageList = (upl_page_info_t *) ioplList->fPageInfo;
+ mapOptions |= kIODMAMapPagingPath;
+ } else {
+ pageList = getPageList(dataP);
+ }
+
+ if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
+ mapOptions |= kIODMAMapPageListFullyOccupied;
+ }
+
+ assert(dataP->fDMAAccess);
+ mapOptions |= dataP->fDMAAccess;
+
+ // Check for direct device non-paged memory
+ if (ioplList->fFlags & kIOPLOnDevice) {
+ mapOptions |= kIODMAMapPhysicallyContiguous;
+ }
+
+ IODMAMapPageList dmaPageList =
+ {
+ .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
+ .pageListCount = _pages,
+ .pageList = &pageList[0]
+ };
+ err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
+ command, &dmaPageList, mapAddress, mapLength);
+
+ if (kIOReturnSuccess == err) {
+ dmaMapRecord(mapper, command, *mapLength);
+ }
+ }
+
+ return err;
+}
+
+/*