+ else
+ {
+ // _task == 0, physical or kIOMemoryTypeUPL
+ memory_object_t pager;
+ vm_size_t size = ptoa_32(_pages);
+
+ if (!getKernelReserved()) panic("getKernelReserved");
+
+ reserved->dp.pagerContig = (1 == _rangesCount);
+ reserved->dp.memory = this;
+
+ pagerFlags = pagerFlagsForCacheMode(cacheMode);
+ if (-1U == pagerFlags) panic("phys is kIODefaultCache");
+ if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
+
+ pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
+ size, pagerFlags);
+ assert (pager);
+ if (!pager) err = kIOReturnVMError;
+ else
+ {
+ srcAddr = nextAddr;
+ entryAddr = trunc_page_64(srcAddr);
+ err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
+ size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
+ assert (KERN_SUCCESS == err);
+ if (KERN_SUCCESS != err) device_pager_deallocate(pager);
+ else
+ {
+ reserved->dp.devicePager = pager;
+ entries->entry = entry;
+ entries->size = size;
+ entries->offset = offset + (entryAddr - srcAddr);
+ entries++;
+ count++;
+ }
+ }
+ }
+
+ ref->count = count;
+ ref->prot = prot;
+
+ if (_task && (KERN_SUCCESS == err)
+ && (kIOMemoryMapCopyOnWrite & _flags)
+ && !(kIOMemoryReferenceCOW & options))
+ {
+ err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
+ }
+
+ if (KERN_SUCCESS == err)
+ {
+ if (MAP_MEM_NAMED_REUSE & prot)
+ {
+ memoryReferenceFree(ref);
+ OSIncrementAtomic(&_memRef->refCount);
+ ref = _memRef;
+ }
+ }
+ else
+ {
+ memoryReferenceFree(ref);
+ ref = NULL;
+ }
+
+ *reference = ref;
+
+ return (err);
+}
+
+kern_return_t
+IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
+{
+ IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
+ IOReturn err;
+ vm_map_offset_t addr;
+
+ addr = ref->mapped;
+
+ err = vm_map_enter_mem_object(map, &addr, ref->size,
+ (vm_map_offset_t) 0,
+ (((ref->options & kIOMapAnywhere)
+ ? VM_FLAGS_ANYWHERE
+ : VM_FLAGS_FIXED)
+ | VM_MAKE_TAG(ref->tag)),
+ IPC_PORT_NULL,
+ (memory_object_offset_t) 0,
+ false, /* copy */
+ ref->prot,
+ ref->prot,
+ VM_INHERIT_NONE);
+ if (KERN_SUCCESS == err)
+ {
+ ref->mapped = (mach_vm_address_t) addr;
+ ref->map = map;
+ }
+
+ return( err );
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceMap(
+ IOMemoryReference * ref,
+ vm_map_t map,
+ mach_vm_size_t inoffset,
+ mach_vm_size_t size,
+ IOOptionBits options,
+ mach_vm_address_t * inaddr)
+{
+ IOReturn err;
+ int64_t offset = inoffset;
+ uint32_t rangeIdx, entryIdx;
+ vm_map_offset_t addr, mapAddr;
+ vm_map_offset_t pageOffset, entryOffset, remain, chunk;
+
+ mach_vm_address_t nextAddr;
+ mach_vm_size_t nextLen;
+ IOByteCount physLen;
+ IOMemoryEntry * entry;
+ vm_prot_t prot, memEntryCacheMode;
+ IOOptionBits type;
+ IOOptionBits cacheMode;
+ vm_tag_t tag;
+ // for the kIOMapPrefault option.
+ upl_page_info_t * pageList = NULL;
+ UInt currentPageIndex = 0;
+ bool didAlloc;
+
+ if (ref->mapRef)
+ {
+ err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
+ return (err);
+ }
+
+ type = _flags & kIOMemoryTypeMask;
+
+ prot = VM_PROT_READ;
+ if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
+ prot &= ref->prot;
+
+ cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
+ if (kIODefaultCache != cacheMode)
+ {
+ // VM system requires write access to update named entry cache mode
+ memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
+ }
+
+ tag = getVMTag(map);
+
+ if (_task)
+ {
+ // Find first range for offset
+ if (!_rangesCount) return (kIOReturnBadArgument);
+ for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
+ {
+ getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ if (remain < nextLen) break;
+ remain -= nextLen;
+ }
+ }
+ else
+ {
+ rangeIdx = 0;
+ remain = 0;
+ nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
+ nextLen = size;
+ }
+
+ assert(remain < nextLen);
+ if (remain >= nextLen) return (kIOReturnBadArgument);
+
+ nextAddr += remain;
+ nextLen -= remain;
+ pageOffset = (page_mask & nextAddr);
+ addr = 0;
+ didAlloc = false;
+
+ if (!(options & kIOMapAnywhere))
+ {
+ addr = *inaddr;
+ if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
+ addr -= pageOffset;
+ }
+
+ // find first entry for offset
+ for (entryIdx = 0;
+ (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
+ entryIdx++) {}
+ entryIdx--;
+ entry = &ref->entries[entryIdx];
+
+ // allocate VM
+ size = round_page_64(size + pageOffset);
+ if (kIOMapOverwrite & options)
+ {
+ if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ {
+ map = IOPageableMapForAddress(addr);
+ }
+ err = KERN_SUCCESS;
+ }
+ else
+ {
+ IOMemoryDescriptorMapAllocRef ref;
+ ref.map = map;
+ ref.tag = tag;
+ ref.options = options;
+ ref.size = size;
+ ref.prot = prot;
+ if (options & kIOMapAnywhere)
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ else
+ ref.mapped = addr;
+ if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
+ else
+ err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
+ if (KERN_SUCCESS == err)
+ {
+ addr = ref.mapped;
+ map = ref.map;
+ didAlloc = true;
+ }
+ }
+
+ /*
+ * Prefaulting is only possible if we wired the memory earlier. Check the
+ * memory type, and the underlying data.
+ */
+ if (options & kIOMapPrefault)
+ {
+ /*
+ * The memory must have been wired by calling ::prepare(), otherwise
+ * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
+ */
+ assert(map != kernel_map);
+ assert(_wireCount != 0);
+ assert(_memoryEntries != NULL);
+ if ((map == kernel_map) ||
+ (_wireCount == 0) ||
+ (_memoryEntries == NULL))
+ {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the page list.
+ ioGMDData* dataP = getDataP(_memoryEntries);
+ ioPLBlock const* ioplList = getIOPLList(dataP);
+ pageList = getPageList(dataP);
+
+ // Get the number of IOPLs.
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+
+ /*
+ * Scan through the IOPL Info Blocks, looking for the first block containing
+ * the offset. The research will go past it, so we'll need to go back to the
+ * right range at the end.
+ */
+ UInt ioplIndex = 0;
+ while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
+ ioplIndex++;
+ ioplIndex--;
+
+ // Retrieve the IOPL info block.
+ ioPLBlock ioplInfo = ioplList[ioplIndex];
+
+ /*
+ * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
+ * array.
+ */
+ if (ioplInfo.fFlags & kIOPLExternUPL)
+ pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
+ else
+ pageList = &pageList[ioplInfo.fPageInfo];
+
+ // Rebase [offset] into the IOPL in order to looks for the first page index.
+ mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
+
+ // Retrieve the index of the first page corresponding to the offset.
+ currentPageIndex = atop_32(offsetInIOPL);
+ }
+
+ // enter mappings
+ remain = size;
+ mapAddr = addr;
+ addr += pageOffset;
+
+ while (remain && (KERN_SUCCESS == err))
+ {
+ entryOffset = offset - entry->offset;
+ if ((page_mask & entryOffset) != pageOffset)
+ {
+ err = kIOReturnNotAligned;
+ break;
+ }
+
+ if (kIODefaultCache != cacheMode)
+ {
+ vm_size_t unused = 0;
+ err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
+ memEntryCacheMode, NULL, entry->entry);
+ assert (KERN_SUCCESS == err);
+ }
+
+ entryOffset -= pageOffset;
+ if (entryOffset >= entry->size) panic("entryOffset");
+ chunk = entry->size - entryOffset;
+ if (chunk)
+ {
+ if (chunk > remain) chunk = remain;
+ if (options & kIOMapPrefault)
+ {
+ UInt nb_pages = round_page(chunk) / PAGE_SIZE;
+ err = vm_map_enter_mem_object_prefault(map,
+ &mapAddr,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_MAKE_TAG(tag)
+ | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
+ entry->entry,
+ entryOffset,
+ prot, // cur
+ prot, // max
+ &pageList[currentPageIndex],
+ nb_pages);
+
+ // Compute the next index in the page list.
+ currentPageIndex += nb_pages;
+ assert(currentPageIndex <= _pages);
+ }
+ else
+ {
+ err = vm_map_enter_mem_object(map,
+ &mapAddr,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_MAKE_TAG(tag)
+ | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
+ entry->entry,
+ entryOffset,
+ false, // copy
+ prot, // cur
+ prot, // max
+ VM_INHERIT_NONE);
+ }
+ if (KERN_SUCCESS != err) break;
+ remain -= chunk;
+ if (!remain) break;
+ mapAddr += chunk;
+ offset += chunk - pageOffset;
+ }
+ pageOffset = 0;
+ entry++;
+ entryIdx++;
+ if (entryIdx >= ref->count)
+ {
+ err = kIOReturnOverrun;
+ break;
+ }
+ }
+
+ if ((KERN_SUCCESS != err) && didAlloc)
+ {
+ (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
+ addr = 0;
+ }
+ *inaddr = addr;
+
+ return (err);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
+ IOMemoryReference * ref,
+ IOByteCount * residentPageCount,
+ IOByteCount * dirtyPageCount)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ unsigned int resident, dirty;
+ unsigned int totalResident, totalDirty;
+
+ totalResident = totalDirty = 0;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0])
+ {
+ entries--;
+ err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
+ if (KERN_SUCCESS != err) break;
+ totalResident += resident;
+ totalDirty += dirty;
+ }
+
+ if (residentPageCount) *residentPageCount = totalResident;
+ if (dirtyPageCount) *dirtyPageCount = totalDirty;
+ return (err);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
+ IOMemoryReference * ref,
+ IOOptionBits newState,
+ IOOptionBits * oldState)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ vm_purgable_t control;
+ int totalState, state;
+
+ totalState = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0])
+ {
+ entries--;
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (KERN_SUCCESS != err) break;
+ err = mach_memory_entry_purgable_control(entries->entry, control, &state);
+ if (KERN_SUCCESS != err) break;
+ err = purgeableStateBits(&state);
+ if (KERN_SUCCESS != err) break;
+
+ if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
+ else if (kIOMemoryPurgeableEmpty == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
+ else totalState = kIOMemoryPurgeableNonVolatile;
+ }
+
+ if (oldState) *oldState = totalState;
+ return (err);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddress(void * address,
+ IOByteCount length,
+ IODirection direction)
+{
+ return IOMemoryDescriptor::
+ withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
+}
+
+#ifndef __LP64__
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddress(IOVirtualAddress address,
+ IOByteCount length,
+ IODirection direction,
+ task_t task)
+{
+ IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
+ if (that)
+ {
+ if (that->initWithAddress(address, length, direction, task))
+ return that;
+
+ that->release();
+ }
+ return 0;
+}
+#endif /* !__LP64__ */
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withPhysicalAddress(
+ IOPhysicalAddress address,
+ IOByteCount length,
+ IODirection direction )
+{
+ return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
+}
+
+#ifndef __LP64__
+IOMemoryDescriptor *
+IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
+ UInt32 withCount,
+ IODirection direction,
+ task_t task,
+ bool asReference)
+{
+ IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
+ if (that)
+ {
+ if (that->initWithRanges(ranges, withCount, direction, task, asReference))
+ return that;
+
+ that->release();
+ }
+ return 0;