- IOReturn err;
- int64_t offset = inoffset;
- uint32_t rangeIdx, entryIdx;
- vm_map_offset_t addr, mapAddr;
- vm_map_offset_t pageOffset, entryOffset, remain, chunk;
-
- mach_vm_address_t nextAddr;
- mach_vm_size_t nextLen;
- IOByteCount physLen;
- IOMemoryEntry * entry;
- vm_prot_t prot, memEntryCacheMode;
- IOOptionBits type;
- IOOptionBits cacheMode;
- vm_tag_t tag;
-
- /*
- * For the kIOMapPrefault option.
- */
- upl_page_info_t *pageList = NULL;
- UInt currentPageIndex = 0;
-
- type = _flags & kIOMemoryTypeMask;
- prot = VM_PROT_READ;
- if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
- prot &= ref->prot;
-
- cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
- if (kIODefaultCache != cacheMode)
- {
- // VM system requires write access to update named entry cache mode
- memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
- }
-
- tag = getVMTag(map);
-
- if (_task)
- {
- // Find first range for offset
- if (!_rangesCount) return (kIOReturnBadArgument);
- for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
- {
- getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
- if (remain < nextLen) break;
- remain -= nextLen;
- }
- }
- else
- {
- rangeIdx = 0;
- remain = 0;
- nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
- nextLen = size;
- }
-
- assert(remain < nextLen);
- if (remain >= nextLen) return (kIOReturnBadArgument);
-
- nextAddr += remain;
- nextLen -= remain;
- pageOffset = (page_mask & nextAddr);
- addr = 0;
- if (!(options & kIOMapAnywhere))
- {
- addr = *inaddr;
- if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
- addr -= pageOffset;
- }
-
- // find first entry for offset
- for (entryIdx = 0;
- (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
- entryIdx++) {}
- entryIdx--;
- entry = &ref->entries[entryIdx];
-
- // allocate VM
- size = round_page_64(size + pageOffset);
- if (kIOMapOverwrite & options)
- {
- if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
- {
- map = IOPageableMapForAddress(addr);
- }
- err = KERN_SUCCESS;
- }
- else
- {
- IOMemoryDescriptorMapAllocRef ref;
- ref.map = map;
- ref.tag = tag;
- ref.options = options;
- ref.size = size;
- ref.prot = prot;
- if (options & kIOMapAnywhere)
- // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
- ref.mapped = 0;
- else
- ref.mapped = addr;
- if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
- err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
- else
- err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
- if (KERN_SUCCESS == err)
- {
- addr = ref.mapped;
- map = ref.map;
- }
- }
-
- /*
- * Prefaulting is only possible if we wired the memory earlier. Check the
- * memory type, and the underlying data.
- */
- if (options & kIOMapPrefault)
- {
- /*
- * The memory must have been wired by calling ::prepare(), otherwise
- * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
- */
- assert(map != kernel_map);
- assert(_wireCount != 0);
- assert(_memoryEntries != NULL);
- if ((map == kernel_map) ||
- (_wireCount == 0) ||
- (_memoryEntries == NULL))
- {
- return kIOReturnBadArgument;
- }
-
- // Get the page list.
- ioGMDData* dataP = getDataP(_memoryEntries);
- ioPLBlock const* ioplList = getIOPLList(dataP);
- pageList = getPageList(dataP);
-
- // Get the number of IOPLs.
- UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
-
- /*
- * Scan through the IOPL Info Blocks, looking for the first block containing
- * the offset. The research will go past it, so we'll need to go back to the
- * right range at the end.
- */
- UInt ioplIndex = 0;
- while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
- ioplIndex++;
- ioplIndex--;
-
- // Retrieve the IOPL info block.
- ioPLBlock ioplInfo = ioplList[ioplIndex];
-
- /*
- * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
- * array.
- */
- if (ioplInfo.fFlags & kIOPLExternUPL)
- pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
- else
- pageList = &pageList[ioplInfo.fPageInfo];
-
- // Rebase [offset] into the IOPL in order to looks for the first page index.
- mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
-
- // Retrieve the index of the first page corresponding to the offset.
- currentPageIndex = atop_32(offsetInIOPL);
- }
-
- // enter mappings
- remain = size;
- mapAddr = addr;
- addr += pageOffset;
-
- while (remain && (KERN_SUCCESS == err))
- {
- entryOffset = offset - entry->offset;
- if ((page_mask & entryOffset) != pageOffset)
- {
- err = kIOReturnNotAligned;
- break;
- }
-
- if (kIODefaultCache != cacheMode)
- {
- vm_size_t unused = 0;
- err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
- memEntryCacheMode, NULL, entry->entry);
- assert (KERN_SUCCESS == err);
- }
-
- entryOffset -= pageOffset;
- if (entryOffset >= entry->size) panic("entryOffset");
- chunk = entry->size - entryOffset;
- if (chunk)
- {
- if (chunk > remain) chunk = remain;
- if (options & kIOMapPrefault)
- {
- UInt nb_pages = round_page(chunk) / PAGE_SIZE;
- err = vm_map_enter_mem_object_prefault(map,
- &mapAddr,
- chunk, 0 /* mask */,
- (VM_FLAGS_FIXED
- | VM_FLAGS_OVERWRITE
- | VM_MAKE_TAG(tag)
- | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
- entry->entry,
- entryOffset,
- prot, // cur
- prot, // max
- &pageList[currentPageIndex],
- nb_pages);
-
- // Compute the next index in the page list.
- currentPageIndex += nb_pages;
- assert(currentPageIndex <= _pages);
- }
- else
- {
- err = vm_map_enter_mem_object(map,
- &mapAddr,
- chunk, 0 /* mask */,
- (VM_FLAGS_FIXED
- | VM_FLAGS_OVERWRITE
- | VM_MAKE_TAG(tag)
- | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
- entry->entry,
- entryOffset,
- false, // copy
- prot, // cur
- prot, // max
- VM_INHERIT_NONE);
- }
- if (KERN_SUCCESS != err) break;
- remain -= chunk;
- if (!remain) break;
- mapAddr += chunk;
- offset += chunk - pageOffset;
- }
- pageOffset = 0;
- entry++;
- entryIdx++;
- if (entryIdx >= ref->count)
- {
- err = kIOReturnOverrun;
- break;
- }
- }
-
- if ((KERN_SUCCESS != err) && addr && !(kIOMapOverwrite & options))
- {
- (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
- addr = 0;
- }
- *inaddr = addr;
-
- return (err);
+ IOReturn err;
+ int64_t offset = inoffset;
+ uint32_t rangeIdx, entryIdx;
+ vm_map_offset_t addr, mapAddr;
+ vm_map_offset_t pageOffset, entryOffset, remain, chunk;
+
+ mach_vm_address_t nextAddr;
+ mach_vm_size_t nextLen;
+ IOByteCount physLen;
+ IOMemoryEntry * entry;
+ vm_prot_t prot, memEntryCacheMode;
+ IOOptionBits type;
+ IOOptionBits cacheMode;
+ vm_tag_t tag;
+ // for the kIOMapPrefault option.
+ upl_page_info_t * pageList = NULL;
+ UInt currentPageIndex = 0;
+ bool didAlloc;
+
+ if (ref->mapRef) {
+ err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
+ return err;
+ }
+
+ type = _flags & kIOMemoryTypeMask;
+
+ prot = VM_PROT_READ;
+ if (!(kIOMapReadOnly & options)) {
+ prot |= VM_PROT_WRITE;
+ }
+ prot &= ref->prot;
+
+ cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
+ if (kIODefaultCache != cacheMode) {
+ // VM system requires write access to update named entry cache mode
+ memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
+ }
+
+ tag = getVMTag(map);
+
+ if (_task) {
+ // Find first range for offset
+ if (!_rangesCount) {
+ return kIOReturnBadArgument;
+ }
+ for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
+ getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ if (remain < nextLen) {
+ break;
+ }
+ remain -= nextLen;
+ }
+ } else {
+ rangeIdx = 0;
+ remain = 0;
+ nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
+ nextLen = size;
+ }
+
+ assert(remain < nextLen);
+ if (remain >= nextLen) {
+ return kIOReturnBadArgument;
+ }
+
+ nextAddr += remain;
+ nextLen -= remain;
+ pageOffset = (page_mask & nextAddr);
+ addr = 0;
+ didAlloc = false;
+
+ if (!(options & kIOMapAnywhere)) {
+ addr = *inaddr;
+ if (pageOffset != (page_mask & addr)) {
+ return kIOReturnNotAligned;
+ }
+ addr -= pageOffset;
+ }
+
+ // find first entry for offset
+ for (entryIdx = 0;
+ (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
+ entryIdx++) {
+ }
+ entryIdx--;
+ entry = &ref->entries[entryIdx];
+
+ // allocate VM
+ size = round_page_64(size + pageOffset);
+ if (kIOMapOverwrite & options) {
+ if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
+ map = IOPageableMapForAddress(addr);
+ }
+ err = KERN_SUCCESS;
+ } else {
+ IOMemoryDescriptorMapAllocRef ref;
+ ref.map = map;
+ ref.tag = tag;
+ ref.options = options;
+ ref.size = size;
+ ref.prot = prot;
+ if (options & kIOMapAnywhere) {
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ } else {
+ ref.mapped = addr;
+ }
+ if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
+ err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
+ } else {
+ err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
+ }
+ if (KERN_SUCCESS == err) {
+ addr = ref.mapped;
+ map = ref.map;
+ didAlloc = true;
+ }
+ }
+
+ /*
+ * If the memory is associated with a device pager but doesn't have a UPL,
+ * it will be immediately faulted in through the pager via populateDevicePager().
+ * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
+ * operations.
+ */
+ if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
+ options &= ~kIOMapPrefault;
+ }
+
+ /*
+ * Prefaulting is only possible if we wired the memory earlier. Check the
+ * memory type, and the underlying data.
+ */
+ if (options & kIOMapPrefault) {
+ /*
+ * The memory must have been wired by calling ::prepare(), otherwise
+ * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
+ */
+ assert(_wireCount != 0);
+ assert(_memoryEntries != NULL);
+ if ((_wireCount == 0) ||
+ (_memoryEntries == NULL)) {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the page list.
+ ioGMDData* dataP = getDataP(_memoryEntries);
+ ioPLBlock const* ioplList = getIOPLList(dataP);
+ pageList = getPageList(dataP);
+
+ // Get the number of IOPLs.
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+
+ /*
+ * Scan through the IOPL Info Blocks, looking for the first block containing
+ * the offset. The research will go past it, so we'll need to go back to the
+ * right range at the end.
+ */
+ UInt ioplIndex = 0;
+ while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) {
+ ioplIndex++;
+ }
+ ioplIndex--;
+
+ // Retrieve the IOPL info block.
+ ioPLBlock ioplInfo = ioplList[ioplIndex];
+
+ /*
+ * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
+ * array.
+ */
+ if (ioplInfo.fFlags & kIOPLExternUPL) {
+ pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
+ } else {
+ pageList = &pageList[ioplInfo.fPageInfo];
+ }
+
+ // Rebase [offset] into the IOPL in order to looks for the first page index.
+ mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
+
+ // Retrieve the index of the first page corresponding to the offset.
+ currentPageIndex = atop_32(offsetInIOPL);
+ }
+
+ // enter mappings
+ remain = size;
+ mapAddr = addr;
+ addr += pageOffset;
+
+ while (remain && (KERN_SUCCESS == err)) {
+ entryOffset = offset - entry->offset;
+ if ((page_mask & entryOffset) != pageOffset) {
+ err = kIOReturnNotAligned;
+ break;
+ }
+
+ if (kIODefaultCache != cacheMode) {
+ vm_size_t unused = 0;
+ err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
+ memEntryCacheMode, NULL, entry->entry);
+ assert(KERN_SUCCESS == err);
+ }
+
+ entryOffset -= pageOffset;
+ if (entryOffset >= entry->size) {
+ panic("entryOffset");
+ }
+ chunk = entry->size - entryOffset;
+ if (chunk) {
+ vm_map_kernel_flags_t vmk_flags;
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
+
+ if (chunk > remain) {
+ chunk = remain;
+ }
+ if (options & kIOMapPrefault) {
+ UInt nb_pages = round_page(chunk) / PAGE_SIZE;
+
+ err = vm_map_enter_mem_object_prefault(map,
+ &mapAddr,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE),
+ vmk_flags,
+ tag,
+ entry->entry,
+ entryOffset,
+ prot, // cur
+ prot, // max
+ &pageList[currentPageIndex],
+ nb_pages);
+
+ // Compute the next index in the page list.
+ currentPageIndex += nb_pages;
+ assert(currentPageIndex <= _pages);
+ } else {
+ err = vm_map_enter_mem_object(map,
+ &mapAddr,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE),
+ vmk_flags,
+ tag,
+ entry->entry,
+ entryOffset,
+ false, // copy
+ prot, // cur
+ prot, // max
+ VM_INHERIT_NONE);
+ }
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ remain -= chunk;
+ if (!remain) {
+ break;
+ }
+ mapAddr += chunk;
+ offset += chunk - pageOffset;
+ }
+ pageOffset = 0;
+ entry++;
+ entryIdx++;
+ if (entryIdx >= ref->count) {
+ err = kIOReturnOverrun;
+ break;
+ }
+ }
+
+ if ((KERN_SUCCESS != err) && didAlloc) {
+ (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
+ addr = 0;
+ }
+ *inaddr = addr;
+
+ return err;