- assert((regionSize & PAGE_MASK) == 0);
-
- regionSize = min(regionSize, checkSize);
- checkSize -= regionSize;
- checkBase += regionSize;
- } // (for each vm region)
- } // (for each io range)
-
- for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
-
- vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
- IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
- _ranges.v[rangeIndex].length +
- page_size - 1);
-
- vm_map_t taskVMMap = getMapForTask(_task, srcAlign);
-
- // If this I/O is for a user land task then protect ourselves
- // against COW and other vm_shenanigans
- if (_task && _task != kernel_task) {
- // setup a data object to hold the 'named' memory regions
- // @@@ gvdl: If we fail to allocate an OSData we will just
- // hope for the best for the time being. Lets not fail a
- // prepare at this late stage in product release.
- if (!_memoryEntries)
- _memoryEntries = OSData::withCapacity(16);
- if (_memoryEntries) {
- vm_object_offset_t desiredSize = srcAlignEnd - srcAlign;
- vm_object_offset_t entryStart = srcAlign;
- ipc_port_t memHandle;
-
- do {
- vm_object_offset_t actualSize = desiredSize;
-
- rc = mach_make_memory_entry_64
- (taskVMMap, &actualSize, entryStart,
- forDirection, &memHandle, NULL);
- if (KERN_SUCCESS != rc) {
- IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc);
- goto abortExit;
- }
-
- _memoryEntries->
- appendBytes(&memHandle, sizeof(memHandle));
- desiredSize -= actualSize;
- entryStart += actualSize;
- } while (desiredSize);
- }
+ return (err);
+}
+
+extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
+extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
+
+IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
+ IOByteCount offset, IOByteCount length )
+{
+ IOByteCount remaining;
+ void (*func)(addr64_t pa, unsigned int count) = 0;
+
+ switch (options)
+ {
+ case kIOMemoryIncoherentIOFlush:
+ func = &dcache_incoherent_io_flush64;
+ break;
+ case kIOMemoryIncoherentIOStore:
+ func = &dcache_incoherent_io_store64;
+ break;
+ }
+
+ if (!func)
+ return (kIOReturnUnsupported);
+
+ remaining = length = min(length, getLength() - offset);
+ while (remaining)
+ // (process another target segment?)
+ {
+ addr64_t dstAddr64;
+ IOByteCount dstLen;
+
+ dstAddr64 = getPhysicalSegment64(offset, &dstLen);
+ if (!dstAddr64)
+ break;
+
+ // Clip segment length to remaining
+ if (dstLen > remaining)
+ dstLen = remaining;
+
+ (*func)(dstAddr64, dstLen);
+
+ offset += dstLen;
+ remaining -= dstLen;
+ }
+
+ return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
+}
+
+#ifdef __ppc__
+extern vm_offset_t static_memory_end;
+#define io_kernel_static_end static_memory_end
+#else
+extern vm_offset_t first_avail;
+#define io_kernel_static_end first_avail
+#endif
+
+static kern_return_t
+io_get_kernel_static_upl(
+ vm_map_t /* map */,
+ vm_address_t offset,
+ vm_size_t *upl_size,
+ upl_t *upl,
+ upl_page_info_array_t page_list,
+ unsigned int *count)
+{
+ unsigned int pageCount, page;
+ ppnum_t phys;
+
+ pageCount = atop_32(*upl_size);
+ if (pageCount > *count)
+ pageCount = *count;
+
+ *upl = NULL;
+
+ for (page = 0; page < pageCount; page++)
+ {
+ phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
+ if (!phys)
+ break;
+ page_list[page].phys_addr = phys;
+ page_list[page].pageout = 0;
+ page_list[page].absent = 0;
+ page_list[page].dirty = 0;
+ page_list[page].precious = 0;
+ page_list[page].device = 0;
+ }
+
+ return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
+}
+
+IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
+{
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ IOReturn error = kIOReturnNoMemory;
+ ioGMDData *dataP;
+ ppnum_t mapBase = 0;
+ IOMapper *mapper;
+ ipc_port_t sharedMem = (ipc_port_t) _memEntry;
+
+ assert(!_wireCount);
+ assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type);
+
+ if (_pages >= gIOMaximumMappedIOPageCount)
+ return kIOReturnNoResources;
+
+ dataP = getDataP(_memoryEntries);
+ mapper = dataP->fMapper;
+ if (mapper && _pages)
+ mapBase = mapper->iovmAlloc(_pages);
+
+ // Note that appendBytes(NULL) zeros the data up to the
+ // desired length.
+ _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
+ dataP = 0; // May no longer be valid so lets not get tempted.
+
+ if (forDirection == kIODirectionNone)
+ forDirection = _direction;
+
+ int uplFlags; // This Mem Desc's default flags for upl creation
+ switch (forDirection)
+ {
+ case kIODirectionOut:
+ // Pages do not need to be marked as dirty on commit
+ uplFlags = UPL_COPYOUT_FROM;
+ _flags |= kIOMemoryPreparedReadOnly;
+ break;
+
+ case kIODirectionIn:
+ default:
+ uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
+ break;
+ }
+ uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
+
+ // Find the appropriate vm_map for the given task
+ vm_map_t curMap;
+ if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
+ curMap = 0;
+ else
+ { curMap = get_task_map(_task); }
+
+ // Iterate over the vector of virtual ranges
+ Ranges vec = _ranges;
+ unsigned int pageIndex = 0;
+ IOByteCount mdOffset = 0;
+ for (UInt range = 0; range < _rangesCount; range++) {
+ ioPLBlock iopl;
+ user_addr_t startPage;
+ IOByteCount numBytes;
+
+ // Get the startPage address and length of vec[range]
+ getAddrLenForInd(startPage, numBytes, type, vec, range);
+ iopl.fPageOffset = (short) startPage & PAGE_MASK;
+ numBytes += iopl.fPageOffset;
+ startPage = trunc_page_64(startPage);
+
+ if (mapper)
+ iopl.fMappedBase = mapBase + pageIndex;
+ else
+ iopl.fMappedBase = 0;
+
+ // Iterate over the current range, creating UPLs
+ while (numBytes) {
+ dataP = getDataP(_memoryEntries);
+ vm_address_t kernelStart = (vm_address_t) startPage;
+ vm_map_t theMap;
+ if (curMap)
+ theMap = curMap;
+ else if (!sharedMem) {
+ assert(_task == kernel_task);
+ theMap = IOPageableMapForAddress(kernelStart);
+ }
+ else
+ theMap = NULL;
+
+ upl_page_info_array_t pageInfo = getPageList(dataP);
+ int ioplFlags = uplFlags;
+ upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
+
+ vm_size_t ioplSize = round_page_32(numBytes);
+ unsigned int numPageInfo = atop_32(ioplSize);
+
+ if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
+ error = io_get_kernel_static_upl(theMap,
+ kernelStart,
+ &ioplSize,
+ &iopl.fIOPL,
+ baseInfo,
+ &numPageInfo);
+ }
+ else if (sharedMem) {
+ error = memory_object_iopl_request(sharedMem,
+ ptoa_32(pageIndex),
+ &ioplSize,
+ &iopl.fIOPL,
+ baseInfo,
+ &numPageInfo,
+ &ioplFlags);