+static IOReturn
+purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ *control = VM_PURGABLE_SET_STATE;
+
+ enum { kIOMemoryPurgeableControlMask = 15 };
+
+ switch (kIOMemoryPurgeableControlMask & newState)
+ {
+ case kIOMemoryPurgeableKeepCurrent:
+ *control = VM_PURGABLE_GET_STATE;
+ break;
+
+ case kIOMemoryPurgeableNonVolatile:
+ *state = VM_PURGABLE_NONVOLATILE;
+ break;
+ case kIOMemoryPurgeableVolatile:
+ *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
+ break;
+ case kIOMemoryPurgeableEmpty:
+ *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
+ break;
+ default:
+ err = kIOReturnBadArgument;
+ break;
+ }
+ return (err);
+}
+
+static IOReturn
+purgeableStateBits(int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ switch (VM_PURGABLE_STATE_MASK & *state)
+ {
+ case VM_PURGABLE_NONVOLATILE:
+ *state = kIOMemoryPurgeableNonVolatile;
+ break;
+ case VM_PURGABLE_VOLATILE:
+ *state = kIOMemoryPurgeableVolatile;
+ break;
+ case VM_PURGABLE_EMPTY:
+ *state = kIOMemoryPurgeableEmpty;
+ break;
+ default:
+ *state = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnNotReady;
+ break;
+ }
+ return (err);
+}
+
+
+static vm_prot_t
+vmProtForCacheMode(IOOptionBits cacheMode)
+{
+ vm_prot_t prot = 0;
+ switch (cacheMode)
+ {
+ case kIOInhibitCache:
+ SET_MAP_MEM(MAP_MEM_IO, prot);
+ break;
+
+ case kIOWriteThruCache:
+ SET_MAP_MEM(MAP_MEM_WTHRU, prot);
+ break;
+
+ case kIOWriteCombineCache:
+ SET_MAP_MEM(MAP_MEM_WCOMB, prot);
+ break;
+
+ case kIOCopybackCache:
+ SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
+ break;
+
+ case kIOCopybackInnerCache:
+ SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
+ break;
+
+ case kIODefaultCache:
+ default:
+ SET_MAP_MEM(MAP_MEM_NOOP, prot);
+ break;
+ }
+
+ return (prot);
+}
+
+static unsigned int
+pagerFlagsForCacheMode(IOOptionBits cacheMode)
+{
+ unsigned int pagerFlags = 0;
+ switch (cacheMode)
+ {
+ case kIOInhibitCache:
+ pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
+ break;
+
+ case kIOWriteThruCache:
+ pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
+ break;
+
+ case kIOWriteCombineCache:
+ pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
+ break;
+
+ case kIOCopybackCache:
+ pagerFlags = DEVICE_PAGER_COHERENT;
+ break;
+
+ case kIOCopybackInnerCache:
+ pagerFlags = DEVICE_PAGER_COHERENT;
+ break;
+
+ case kIODefaultCache:
+ default:
+ pagerFlags = -1U;
+ break;
+ }
+ return (pagerFlags);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+struct IOMemoryEntry
+{
+ ipc_port_t entry;
+ int64_t offset;
+ uint64_t size;
+};
+
+struct IOMemoryReference
+{
+ volatile SInt32 refCount;
+ vm_prot_t prot;
+ uint32_t capacity;
+ uint32_t count;
+ struct IOMemoryReference * mapRef;
+ IOMemoryEntry entries[0];
+};
+
+enum
+{
+ kIOMemoryReferenceReuse = 0x00000001,
+ kIOMemoryReferenceWrite = 0x00000002,
+ kIOMemoryReferenceCOW = 0x00000004,
+};
+
+SInt32 gIOMemoryReferenceCount;
+
+IOMemoryReference *
+IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
+{
+ IOMemoryReference * ref;
+ size_t newSize, oldSize, copySize;
+
+ newSize = (sizeof(IOMemoryReference)
+ - sizeof(ref->entries)
+ + capacity * sizeof(ref->entries[0]));
+ ref = (typeof(ref)) IOMalloc(newSize);
+ if (realloc)
+ {
+ oldSize = (sizeof(IOMemoryReference)
+ - sizeof(realloc->entries)
+ + realloc->capacity * sizeof(realloc->entries[0]));
+ copySize = oldSize;
+ if (copySize > newSize) copySize = newSize;
+ if (ref) bcopy(realloc, ref, copySize);
+ IOFree(realloc, oldSize);
+ }
+ else if (ref)
+ {
+ bzero(ref, sizeof(*ref));
+ ref->refCount = 1;
+ OSIncrementAtomic(&gIOMemoryReferenceCount);
+ }
+ if (!ref) return (0);
+ ref->capacity = capacity;
+ return (ref);
+}
+
+void
+IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
+{
+ IOMemoryEntry * entries;
+ size_t size;
+
+ if (ref->mapRef)
+ {
+ memoryReferenceFree(ref->mapRef);
+ ref->mapRef = 0;
+ }
+
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0])
+ {
+ entries--;
+ ipc_port_release_send(entries->entry);
+ }
+ size = (sizeof(IOMemoryReference)
+ - sizeof(ref->entries)
+ + ref->capacity * sizeof(ref->entries[0]));
+ IOFree(ref, size);
+
+ OSDecrementAtomic(&gIOMemoryReferenceCount);
+}
+
+void
+IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
+{
+ if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
+}
+
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceCreate(
+ IOOptionBits options,
+ IOMemoryReference ** reference)
+{
+ enum { kCapacity = 4, kCapacityInc = 4 };
+
+ kern_return_t err;
+ IOMemoryReference * ref;
+ IOMemoryEntry * entries;
+ IOMemoryEntry * cloneEntries;
+ vm_map_t map;
+ ipc_port_t entry, cloneEntry;
+ vm_prot_t prot;
+ memory_object_size_t actualSize;
+ uint32_t rangeIdx;
+ uint32_t count;
+ mach_vm_address_t entryAddr, endAddr, entrySize;
+ mach_vm_size_t srcAddr, srcLen;
+ mach_vm_size_t nextAddr, nextLen;
+ mach_vm_size_t offset, remain;
+ IOByteCount physLen;
+ IOOptionBits type = (_flags & kIOMemoryTypeMask);
+ IOOptionBits cacheMode;
+ unsigned int pagerFlags;
+ vm_tag_t tag;
+
+ ref = memoryReferenceAlloc(kCapacity, NULL);
+ if (!ref) return (kIOReturnNoMemory);
+
+ tag = getVMTag(kernel_map);
+ entries = &ref->entries[0];
+ count = 0;
+ err = KERN_SUCCESS;
+
+ offset = 0;
+ rangeIdx = 0;
+ if (_task)
+ {
+ getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ }
+ else
+ {
+ nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
+ nextLen = physLen;
+
+ // default cache mode for physical
+ if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
+ {
+ IOOptionBits mode;
+ pagerFlags = IODefaultCacheBits(nextAddr);
+ if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
+ {
+ if (DEVICE_PAGER_GUARDED & pagerFlags)
+ mode = kIOInhibitCache;
+ else
+ mode = kIOWriteCombineCache;
+ }
+ else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
+ mode = kIOWriteThruCache;
+ else
+ mode = kIOCopybackCache;
+ _flags |= (mode << kIOMemoryBufferCacheShift);
+ }
+ }
+
+ // cache mode & vm_prot
+ prot = VM_PROT_READ;
+ cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
+ prot |= vmProtForCacheMode(cacheMode);
+ // VM system requires write access to change cache mode
+ if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
+ if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
+ if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
+ if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY;
+
+ if ((kIOMemoryReferenceReuse & options) && _memRef)
+ {
+ cloneEntries = &_memRef->entries[0];
+ prot |= MAP_MEM_NAMED_REUSE;
+ }
+
+ if (_task)
+ {
+ // virtual ranges
+
+ if (kIOMemoryBufferPageable & _flags)
+ {
+ // IOBufferMemoryDescriptor alloc - set flags for entry + object create
+ prot |= MAP_MEM_NAMED_CREATE;
+ if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
+ if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED;
+
+ prot |= VM_PROT_WRITE;
+ map = NULL;
+ }
+ else map = get_task_map(_task);
+
+ remain = _length;
+ while (remain)
+ {
+ srcAddr = nextAddr;
+ srcLen = nextLen;
+ nextAddr = 0;
+ nextLen = 0;
+ // coalesce addr range
+ for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
+ {
+ getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ if ((srcAddr + srcLen) != nextAddr) break;
+ srcLen += nextLen;
+ }
+ entryAddr = trunc_page_64(srcAddr);
+ endAddr = round_page_64(srcAddr + srcLen);
+ do
+ {
+ entrySize = (endAddr - entryAddr);
+ if (!entrySize) break;
+ actualSize = entrySize;
+
+ cloneEntry = MACH_PORT_NULL;
+ if (MAP_MEM_NAMED_REUSE & prot)
+ {
+ if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
+ else prot &= ~MAP_MEM_NAMED_REUSE;
+ }
+
+ err = mach_make_memory_entry_64(map,
+ &actualSize, entryAddr, prot, &entry, cloneEntry);
+
+ if (KERN_SUCCESS != err) break;
+ if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
+
+ if (count >= ref->capacity)
+ {
+ ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
+ entries = &ref->entries[count];
+ }
+ entries->entry = entry;
+ entries->size = actualSize;
+ entries->offset = offset + (entryAddr - srcAddr);
+ entryAddr += actualSize;
+ if (MAP_MEM_NAMED_REUSE & prot)
+ {
+ if ((cloneEntries->entry == entries->entry)
+ && (cloneEntries->size == entries->size)
+ && (cloneEntries->offset == entries->offset)) cloneEntries++;
+ else prot &= ~MAP_MEM_NAMED_REUSE;
+ }
+ entries++;
+ count++;
+ }
+ while (true);
+ offset += srcLen;
+ remain -= srcLen;
+ }
+ }
+ else
+ {
+ // _task == 0, physical or kIOMemoryTypeUPL
+ memory_object_t pager;
+ vm_size_t size = ptoa_32(_pages);
+
+ if (!getKernelReserved()) panic("getKernelReserved");
+
+ reserved->dp.pagerContig = (1 == _rangesCount);
+ reserved->dp.memory = this;
+
+ pagerFlags = pagerFlagsForCacheMode(cacheMode);
+ if (-1U == pagerFlags) panic("phys is kIODefaultCache");
+ if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
+
+ pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
+ size, pagerFlags);
+ assert (pager);
+ if (!pager) err = kIOReturnVMError;
+ else
+ {
+ srcAddr = nextAddr;
+ entryAddr = trunc_page_64(srcAddr);
+ err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
+ size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
+ assert (KERN_SUCCESS == err);
+ if (KERN_SUCCESS != err) device_pager_deallocate(pager);
+ else
+ {
+ reserved->dp.devicePager = pager;
+ entries->entry = entry;
+ entries->size = size;
+ entries->offset = offset + (entryAddr - srcAddr);
+ entries++;
+ count++;
+ }
+ }
+ }
+
+ ref->count = count;
+ ref->prot = prot;
+
+ if (_task && (KERN_SUCCESS == err)
+ && (kIOMemoryMapCopyOnWrite & _flags)
+ && !(kIOMemoryReferenceCOW & options))
+ {
+ err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
+ }
+
+ if (KERN_SUCCESS == err)
+ {
+ if (MAP_MEM_NAMED_REUSE & prot)
+ {
+ memoryReferenceFree(ref);
+ OSIncrementAtomic(&_memRef->refCount);
+ ref = _memRef;
+ }
+ }
+ else
+ {
+ memoryReferenceFree(ref);
+ ref = NULL;
+ }
+
+ *reference = ref;
+
+ return (err);
+}
+
+kern_return_t
+IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
+{
+ IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
+ IOReturn err;
+ vm_map_offset_t addr;
+
+ addr = ref->mapped;
+
+ err = vm_map_enter_mem_object(map, &addr, ref->size,
+ (vm_map_offset_t) 0,
+ (((ref->options & kIOMapAnywhere)
+ ? VM_FLAGS_ANYWHERE
+ : VM_FLAGS_FIXED)
+ | VM_MAKE_TAG(ref->tag)),
+ IPC_PORT_NULL,
+ (memory_object_offset_t) 0,
+ false, /* copy */
+ ref->prot,
+ ref->prot,
+ VM_INHERIT_NONE);
+ if (KERN_SUCCESS == err)
+ {
+ ref->mapped = (mach_vm_address_t) addr;
+ ref->map = map;
+ }
+
+ return( err );
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceMap(
+ IOMemoryReference * ref,
+ vm_map_t map,
+ mach_vm_size_t inoffset,
+ mach_vm_size_t size,
+ IOOptionBits options,
+ mach_vm_address_t * inaddr)
+{
+ IOReturn err;
+ int64_t offset = inoffset;
+ uint32_t rangeIdx, entryIdx;
+ vm_map_offset_t addr, mapAddr;
+ vm_map_offset_t pageOffset, entryOffset, remain, chunk;
+
+ mach_vm_address_t nextAddr;
+ mach_vm_size_t nextLen;
+ IOByteCount physLen;
+ IOMemoryEntry * entry;
+ vm_prot_t prot, memEntryCacheMode;
+ IOOptionBits type;
+ IOOptionBits cacheMode;
+ vm_tag_t tag;
+ // for the kIOMapPrefault option.
+ upl_page_info_t * pageList = NULL;
+ UInt currentPageIndex = 0;
+ bool didAlloc;
+
+ if (ref->mapRef)
+ {
+ err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
+ return (err);
+ }
+
+ type = _flags & kIOMemoryTypeMask;
+
+ prot = VM_PROT_READ;
+ if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
+ prot &= ref->prot;
+
+ cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
+ if (kIODefaultCache != cacheMode)
+ {
+ // VM system requires write access to update named entry cache mode
+ memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
+ }
+
+ tag = getVMTag(map);
+
+ if (_task)
+ {
+ // Find first range for offset
+ if (!_rangesCount) return (kIOReturnBadArgument);
+ for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
+ {
+ getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ if (remain < nextLen) break;
+ remain -= nextLen;
+ }
+ }
+ else
+ {
+ rangeIdx = 0;
+ remain = 0;
+ nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
+ nextLen = size;
+ }
+
+ assert(remain < nextLen);
+ if (remain >= nextLen) return (kIOReturnBadArgument);
+
+ nextAddr += remain;
+ nextLen -= remain;
+ pageOffset = (page_mask & nextAddr);
+ addr = 0;
+ didAlloc = false;
+
+ if (!(options & kIOMapAnywhere))
+ {
+ addr = *inaddr;
+ if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
+ addr -= pageOffset;
+ }
+
+ // find first entry for offset
+ for (entryIdx = 0;
+ (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
+ entryIdx++) {}
+ entryIdx--;
+ entry = &ref->entries[entryIdx];
+
+ // allocate VM
+ size = round_page_64(size + pageOffset);
+ if (kIOMapOverwrite & options)
+ {
+ if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ {
+ map = IOPageableMapForAddress(addr);
+ }
+ err = KERN_SUCCESS;
+ }
+ else
+ {
+ IOMemoryDescriptorMapAllocRef ref;
+ ref.map = map;
+ ref.tag = tag;
+ ref.options = options;
+ ref.size = size;
+ ref.prot = prot;
+ if (options & kIOMapAnywhere)
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ else
+ ref.mapped = addr;
+ if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
+ else
+ err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
+ if (KERN_SUCCESS == err)
+ {
+ addr = ref.mapped;
+ map = ref.map;
+ didAlloc = true;
+ }
+ }
+
+ /*
+ * Prefaulting is only possible if we wired the memory earlier. Check the
+ * memory type, and the underlying data.
+ */
+ if (options & kIOMapPrefault)
+ {
+ /*
+ * The memory must have been wired by calling ::prepare(), otherwise
+ * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
+ */
+ assert(map != kernel_map);
+ assert(_wireCount != 0);
+ assert(_memoryEntries != NULL);
+ if ((map == kernel_map) ||
+ (_wireCount == 0) ||
+ (_memoryEntries == NULL))
+ {
+ return kIOReturnBadArgument;
+ }
+
+ // Get the page list.
+ ioGMDData* dataP = getDataP(_memoryEntries);
+ ioPLBlock const* ioplList = getIOPLList(dataP);
+ pageList = getPageList(dataP);
+
+ // Get the number of IOPLs.
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+
+ /*
+ * Scan through the IOPL Info Blocks, looking for the first block containing
+ * the offset. The research will go past it, so we'll need to go back to the
+ * right range at the end.
+ */
+ UInt ioplIndex = 0;
+ while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
+ ioplIndex++;
+ ioplIndex--;
+
+ // Retrieve the IOPL info block.
+ ioPLBlock ioplInfo = ioplList[ioplIndex];
+
+ /*
+ * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
+ * array.
+ */
+ if (ioplInfo.fFlags & kIOPLExternUPL)
+ pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
+ else
+ pageList = &pageList[ioplInfo.fPageInfo];
+
+ // Rebase [offset] into the IOPL in order to looks for the first page index.
+ mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
+
+ // Retrieve the index of the first page corresponding to the offset.
+ currentPageIndex = atop_32(offsetInIOPL);
+ }
+
+ // enter mappings
+ remain = size;
+ mapAddr = addr;
+ addr += pageOffset;
+
+ while (remain && (KERN_SUCCESS == err))
+ {
+ entryOffset = offset - entry->offset;
+ if ((page_mask & entryOffset) != pageOffset)
+ {
+ err = kIOReturnNotAligned;
+ break;
+ }
+
+ if (kIODefaultCache != cacheMode)
+ {
+ vm_size_t unused = 0;
+ err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
+ memEntryCacheMode, NULL, entry->entry);
+ assert (KERN_SUCCESS == err);
+ }
+
+ entryOffset -= pageOffset;
+ if (entryOffset >= entry->size) panic("entryOffset");
+ chunk = entry->size - entryOffset;
+ if (chunk)
+ {
+ if (chunk > remain) chunk = remain;
+ if (options & kIOMapPrefault)
+ {
+ UInt nb_pages = round_page(chunk) / PAGE_SIZE;
+ err = vm_map_enter_mem_object_prefault(map,
+ &mapAddr,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_MAKE_TAG(tag)
+ | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
+ entry->entry,
+ entryOffset,
+ prot, // cur
+ prot, // max
+ &pageList[currentPageIndex],
+ nb_pages);
+
+ // Compute the next index in the page list.
+ currentPageIndex += nb_pages;
+ assert(currentPageIndex <= _pages);
+ }
+ else
+ {
+ err = vm_map_enter_mem_object(map,
+ &mapAddr,
+ chunk, 0 /* mask */,
+ (VM_FLAGS_FIXED
+ | VM_FLAGS_OVERWRITE
+ | VM_MAKE_TAG(tag)
+ | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
+ entry->entry,
+ entryOffset,
+ false, // copy
+ prot, // cur
+ prot, // max
+ VM_INHERIT_NONE);
+ }
+ if (KERN_SUCCESS != err) break;
+ remain -= chunk;
+ if (!remain) break;
+ mapAddr += chunk;
+ offset += chunk - pageOffset;
+ }
+ pageOffset = 0;
+ entry++;
+ entryIdx++;
+ if (entryIdx >= ref->count)
+ {
+ err = kIOReturnOverrun;
+ break;
+ }
+ }
+
+ if ((KERN_SUCCESS != err) && didAlloc)
+ {
+ (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
+ addr = 0;
+ }
+ *inaddr = addr;
+
+ return (err);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
+ IOMemoryReference * ref,
+ IOByteCount * residentPageCount,
+ IOByteCount * dirtyPageCount)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ unsigned int resident, dirty;
+ unsigned int totalResident, totalDirty;
+
+ totalResident = totalDirty = 0;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0])
+ {
+ entries--;
+ err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
+ if (KERN_SUCCESS != err) break;
+ totalResident += resident;
+ totalDirty += dirty;
+ }
+
+ if (residentPageCount) *residentPageCount = totalResident;
+ if (dirtyPageCount) *dirtyPageCount = totalDirty;
+ return (err);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
+ IOMemoryReference * ref,
+ IOOptionBits newState,
+ IOOptionBits * oldState)
+{
+ IOReturn err;
+ IOMemoryEntry * entries;
+ vm_purgable_t control;
+ int totalState, state;
+
+ totalState = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnSuccess;
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0])
+ {
+ entries--;
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (KERN_SUCCESS != err) break;
+ err = mach_memory_entry_purgable_control(entries->entry, control, &state);
+ if (KERN_SUCCESS != err) break;
+ err = purgeableStateBits(&state);
+ if (KERN_SUCCESS != err) break;
+
+ if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
+ else if (kIOMemoryPurgeableEmpty == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
+ else totalState = kIOMemoryPurgeableNonVolatile;
+ }
+
+ if (oldState) *oldState = totalState;
+ return (err);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+