+ if (KERN_SUCCESS == err) {
+ addr = ref.mapped;
+ map = ref.map;
+ didAlloc = true;
+ }
+#if LOGUNALIGN
+ IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
+#endif
+ }
+
+ /*
+ * If the memory is associated with a device pager but doesn't have a UPL,
+ * it will be immediately faulted in through the pager via populateDevicePager().
+ * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
+ * operations.
+ */
+ if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
+ options &= ~kIOMapPrefault;
+ }
+
+ /*
+ * Prefaulting is only possible if we wired the memory earlier. Check the
+ * memory type, and the underlying data.
+ */
+ if (options & kIOMapPrefault) {
+ /*
+ * The memory must have been wired by calling ::prepare(), otherwise
+ * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
+ */
+ assert(_wireCount != 0);
+ assert(_memoryEntries != NULL);
+ if ((_wireCount == 0) ||
+ (_memoryEntries == NULL)) {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the page list.
+ ioGMDData* dataP = getDataP(_memoryEntries);
+ ioPLBlock const* ioplList = getIOPLList(dataP);
+ pageList = getPageList(dataP);
+
+ // Get the number of IOPLs.
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+
+ /*
+ * Scan through the IOPL Info Blocks, looking for the first block containing
+ * the offset. The research will go past it, so we'll need to go back to the
+ * right range at the end.
+ */
+ UInt ioplIndex = 0;
+ while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
+ ioplIndex++;
+ }
+ ioplIndex--;
+
+ // Retrieve the IOPL info block.
+ ioPLBlock ioplInfo = ioplList[ioplIndex];
+
+ /*
+ * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
+ * array.
+ */
+ if (ioplInfo.fFlags & kIOPLExternUPL) {
+ pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
+ } else {
+ pageList = &pageList[ioplInfo.fPageInfo];
+ }
+
+ // Rebase [offset] into the IOPL in order to looks for the first page index.
+ mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
+
+ // Retrieve the index of the first page corresponding to the offset.
+ currentPageIndex = atop_32(offsetInIOPL);
+ }
+
+ // enter mappings
+ remain = size;
+ mapAddr = addr;
+ entryIdx = firstEntryIdx;
+ entry = &ref->entries[entryIdx];
+
+ while (remain && (KERN_SUCCESS == err)) {
+#if LOGUNALIGN
+ printf("offset %qx, %qx\n", offset, entry->offset);
+#endif
+ if (kIODefaultCache != cacheMode) {
+ vm_size_t unused = 0;
+ err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
+ memEntryCacheMode, NULL, entry->entry);
+ assert(KERN_SUCCESS == err);
+ }
+ entryOffset = offset - entry->offset;
+ if (entryOffset >= entry->size) {
+ panic("entryOffset");
+ }
+ chunk = entry->size - entryOffset;
+#if LOGUNALIGN
+ printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
+#endif
+ if (chunk) {
+ vm_map_kernel_flags_t vmk_flags;
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
+
+ if (chunk > remain) {
+ chunk = remain;
+ }
+ mapAddrOut = mapAddr;
+ if (options & kIOMapPrefault) {
+ UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
+
+ err = vm_map_enter_mem_object_prefault(map,
+ &mapAddrOut,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_FLAGS_RETURN_DATA_ADDR),
+ vmk_flags,
+ tag,
+ entry->entry,
+ entryOffset,
+ prot, // cur
+ prot, // max
+ &pageList[currentPageIndex],
+ nb_pages);
+
+ // Compute the next index in the page list.
+ currentPageIndex += nb_pages;
+ assert(currentPageIndex <= _pages);
+ } else {
+#if LOGUNALIGN
+ printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
+#endif
+ err = vm_map_enter_mem_object(map,
+ &mapAddrOut,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_FLAGS_RETURN_DATA_ADDR),
+ vmk_flags,
+ tag,
+ entry->entry,
+ entryOffset,
+ false, // copy
+ prot, // cur
+ prot, // max
+ VM_INHERIT_NONE);
+ }
+ if (KERN_SUCCESS != err) {
+ panic("map enter err %x", err);
+ break;
+ }
+#if LOGUNALIGN
+ printf("mapAddr o %qx\n", mapAddrOut);
+#endif
+ if (entryIdx == firstEntryIdx) {
+ addr = mapAddrOut;
+ }
+ remain -= chunk;
+ if (!remain) {
+ break;
+ }
+ mach_vm_size_t entrySize;
+ err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
+ assert(KERN_SUCCESS == err);
+ mapAddr += entrySize;
+ offset += chunk;
+ }
+
+ entry++;
+ entryIdx++;
+ if (entryIdx >= ref->count) {
+ err = kIOReturnOverrun;
+ break;
+ }
+ }
+
+ if (KERN_SUCCESS != err) {
+ DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
+ }
+
+ if ((KERN_SUCCESS != err) && didAlloc) {
+ (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
+ addr = 0;
+ }
+ *inaddr = addr;
+
+ return err;
+}
+
+uint64_t
+IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
+ IOMemoryReference * ref,
+ uint64_t * offset)
+{
+ kern_return_t kr;
+ vm_object_offset_t data_offset = 0;
+ uint64_t total;
+ uint32_t idx;
+
+ assert(ref->count);
+ if (offset) {
+ *offset = (uint64_t) data_offset;
+ }
+ total = 0;
+ for (idx = 0; idx < ref->count; idx++) {
+ kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
+ &data_offset);
+ if (KERN_SUCCESS != kr) {
+ DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
+ } else if (0 != data_offset) {
+ DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
+ }
+ if (offset && !idx) {
+ *offset = (uint64_t) data_offset;
+ }
+ total += round_page(data_offset + ref->entries[idx].size);
+ }
+
+ DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
+ (offset ? *offset : (vm_object_offset_t)-1), total);
+
+ return total;
+}
+
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
+ IOMemoryReference * ref,
+ IOByteCount * residentPageCount,
+ IOByteCount * dirtyPageCount)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ unsigned int resident, dirty;
+ unsigned int totalResident, totalDirty;
+
+ totalResident = totalDirty = 0;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0]) {
+ entries--;
+ err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ totalResident += resident;
+ totalDirty += dirty;
+ }
+
+ if (residentPageCount) {
+ *residentPageCount = totalResident;
+ }
+ if (dirtyPageCount) {
+ *dirtyPageCount = totalDirty;
+ }
+ return err;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
+ IOMemoryReference * ref,
+ IOOptionBits newState,
+ IOOptionBits * oldState)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ vm_purgable_t control;
+ int totalState, state;
+
+ totalState = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0]) {
+ entries--;
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ err = purgeableStateBits(&state);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+
+ if (kIOMemoryPurgeableEmpty == state) {
+ totalState = kIOMemoryPurgeableEmpty;
+ } else if (kIOMemoryPurgeableEmpty == totalState) {
+ continue;
+ } else if (kIOMemoryPurgeableVolatile == totalState) {
+ continue;
+ } else if (kIOMemoryPurgeableVolatile == state) {
+ totalState = kIOMemoryPurgeableVolatile;
+ } else {
+ totalState = kIOMemoryPurgeableNonVolatile;
+ }
+ }
+
+ if (oldState) {
+ *oldState = totalState;
+ }
+ return err;
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
+ IOMemoryReference * ref,
+ task_t newOwner,
+ int newLedgerTag,
+ IOOptionBits newLedgerOptions)
+{
+ IOReturn err, totalErr;
+ IOMemoryEntry * entries;
+
+ totalErr = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0]) {
+ entries--;
+
+ err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
+ if (KERN_SUCCESS != err) {
+ totalErr = err;
+ }
+ }
+
+ return totalErr;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+OSSharedPtr<IOMemoryDescriptor>
+IOMemoryDescriptor::withAddress(void * address,
+ IOByteCount length,
+ IODirection direction)
+{
+ return IOMemoryDescriptor::
+ withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
+}