+
+#include "IOKitKernelInternal.h"
+
+IOReturn IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap,
+ IOVirtualAddress * __address,
+ IOOptionBits options,
+ IOByteCount __offset,
+ IOByteCount __length)
+{
+ IOMemoryMap * mapping = (IOMemoryMap *) *__address;
+ vm_map_t map = mapping->fAddressMap;
+ mach_vm_size_t offset = mapping->fOffset;
+ mach_vm_size_t length = mapping->fLength;
+ mach_vm_address_t address = mapping->fAddress;
+
+ kern_return_t err;
+ IOOptionBits subOptions;
+ mach_vm_size_t mapOffset;
+ mach_vm_size_t bytesRemaining, chunk;
+ mach_vm_address_t nextAddress;
+ IOMemoryDescriptorMapAllocRef ref;
+ vm_prot_t prot;
+
+ do
+ {
+ prot = VM_PROT_READ;
+ if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
+
+ if (kIOMapOverwrite & options)
+ {
+ if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ {
+ map = IOPageableMapForAddress(address);
+ }
+ err = KERN_SUCCESS;
+ }
+ else
+ {
+ ref.map = map;
+ ref.tag = IOMemoryTag(map);
+ ref.options = options;
+ ref.size = length;
+ ref.prot = prot;
+ if (options & kIOMapAnywhere)
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ else
+ ref.mapped = mapping->fAddress;
+
+ if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
+ else
+ err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
+
+ if (KERN_SUCCESS != err) break;
+
+ address = ref.mapped;
+ mapping->fAddress = address;
+ }
+
+ mapOffset = offset;
+ bytesRemaining = length;
+ nextAddress = address;
+ assert(mapOffset <= _length);
+ subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
+
+ for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++)
+ {
+ chunk = _descriptors[index]->getLength();
+ if (mapOffset >= chunk)
+ {
+ mapOffset -= chunk;
+ continue;
+ }
+ chunk -= mapOffset;
+ if (chunk > bytesRemaining) chunk = bytesRemaining;
+ IOMemoryMap * subMap;
+ subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk );
+ if (!subMap) break;
+ subMap->release(); // kIOMapOverwrite means it will not deallocate
+
+ bytesRemaining -= chunk;
+ nextAddress += chunk;
+ mapOffset = 0;
+ }
+ if (bytesRemaining) err = kIOReturnUnderrun;
+ }
+ while (false);
+
+ if (kIOReturnSuccess == err)
+ {
+#if IOTRACKING
+ IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
+#endif
+ }
+
+ return (err);
+}
+
+IOReturn IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err;
+ IOOptionBits totalState, state;
+
+ totalState = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnSuccess;
+ for (unsigned index = 0; index < _descriptorsCount; index++)
+ {
+ err = _descriptors[index]->setPurgeable(newState, &state);
+ if (kIOReturnSuccess != err) break;
+
+ if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
+ else if (kIOMemoryPurgeableEmpty == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
+ else totalState = kIOMemoryPurgeableNonVolatile;
+ }
+ if (oldState) *oldState = totalState;
+
+ return (err);
+}
+
+IOReturn IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
+ IOByteCount * pDirtyPageCount)
+{
+ IOReturn err;
+ IOByteCount totalResidentPageCount, totalDirtyPageCount;
+ IOByteCount residentPageCount, dirtyPageCount;
+
+ err = kIOReturnSuccess;
+ totalResidentPageCount = totalDirtyPageCount = 0;
+ for (unsigned index = 0; index < _descriptorsCount; index++)
+ {
+ err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount);
+ if (kIOReturnSuccess != err) break;
+ totalResidentPageCount += residentPageCount;
+ totalDirtyPageCount += dirtyPageCount;
+ }
+
+ if (pResidentPageCount) *pResidentPageCount = totalResidentPageCount;
+ if (pDirtyPageCount) *pDirtyPageCount = totalDirtyPageCount;
+
+ return (err);
+}