+ address = getPhysicalSegment(offset, lengthOfSegment, 0);
+
+ if (lengthOfSegment)
+ length = *lengthOfSegment;
+
+ if ((address + length) > 0x100000000ULL)
+ {
+ panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
+ address, (long) length, (getMetaClass())->getClassName());
+ }
+
+ return ((IOPhysicalAddress) address);
+}
+
+addr64_t
+IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ IOPhysicalAddress phys32;
+ IOByteCount length;
+ addr64_t phys64;
+ IOMapper * mapper = 0;
+
+ phys32 = getPhysicalSegment(offset, lengthOfSegment);
+ if (!phys32)
+ return 0;
+
+ if (gIOSystemMapper)
+ mapper = gIOSystemMapper;
+
+ if (mapper)
+ {
+ IOByteCount origLen;
+
+ phys64 = mapper->mapAddr(phys32);
+ origLen = *lengthOfSegment;
+ length = page_size - (phys64 & (page_size - 1));
+ while ((length < origLen)
+ && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
+ length += page_size;
+ if (length > origLen)
+ length = origLen;
+
+ *lengthOfSegment = length;
+ }
+ else
+ phys64 = (addr64_t) phys32;
+
+ return phys64;
+}
+
+IOPhysicalAddress
+IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
+}
+
+IOPhysicalAddress
+IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
+}
+
+void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+ IOByteCount * lengthOfSegment)
+{
+ if (_task == kernel_task)
+ return (void *) getSourceSegment(offset, lengthOfSegment);
+ else
+ panic("IOGMD::getVirtualSegment deprecated");
+
+ return 0;
+}
+#endif /* !__LP64__ */
+
+IOReturn
+IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ if (kIOMDGetCharacteristics == op) {
+ if (dataSize < sizeof(IOMDDMACharacteristics))
+ return kIOReturnUnderrun;
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = getLength();
+ data->fSGCount = 0;
+ data->fDirection = getDirection();
+ if (IOMapper::gSystem)
+ data->fIsMapped = true;
+ data->fIsPrepared = true; // Assume prepared - fails safe
+ }
+ else if (kIOMDWalkSegments & op) {
+ if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
+ return kIOReturnUnderrun;
+
+ IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
+ IOByteCount offset = (IOByteCount) data->fOffset;
+
+ IOPhysicalLength length;
+ IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
+ if (data->fMapped && IOMapper::gSystem)
+ data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
+ else
+ data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
+ data->fLength = length;
+ }
+ else
+ return kIOReturnBadArgument;
+
+ return kIOReturnSuccess;
+}
+
+static IOReturn
+purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ *control = VM_PURGABLE_SET_STATE;
+ switch (newState)
+ {
+ case kIOMemoryPurgeableKeepCurrent:
+ *control = VM_PURGABLE_GET_STATE;
+ break;
+
+ case kIOMemoryPurgeableNonVolatile:
+ *state = VM_PURGABLE_NONVOLATILE;
+ break;
+ case kIOMemoryPurgeableVolatile:
+ *state = VM_PURGABLE_VOLATILE;
+ break;
+ case kIOMemoryPurgeableEmpty:
+ *state = VM_PURGABLE_EMPTY;
+ break;
+ default:
+ err = kIOReturnBadArgument;
+ break;
+ }
+ return (err);
+}
+
+static IOReturn
+purgeableStateBits(int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ switch (*state)
+ {
+ case VM_PURGABLE_NONVOLATILE:
+ *state = kIOMemoryPurgeableNonVolatile;
+ break;
+ case VM_PURGABLE_VOLATILE:
+ *state = kIOMemoryPurgeableVolatile;
+ break;
+ case VM_PURGABLE_EMPTY:
+ *state = kIOMemoryPurgeableEmpty;
+ break;
+ default:
+ *state = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnNotReady;
+ break;
+ }
+ return (err);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnSuccess;
+ vm_purgable_t control;
+ int state;
+
+ if (_memEntry)
+ {
+ err = super::setPurgeable(newState, oldState);
+ }
+ else
+ {
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
+ do
+ {
+ // Find the appropriate vm_map for the given task
+ vm_map_t curMap;
+ if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
+ {
+ err = kIOReturnNotReady;
+ break;
+ }
+ else
+ curMap = get_task_map(_task);
+
+ // can only do one range
+ Ranges vec = _ranges;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ user_addr_t addr;
+ IOByteCount len;
+ getAddrLenForInd(addr, len, type, vec, 0);
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (kIOReturnSuccess != err)
+ break;
+ err = mach_vm_purgable_control(curMap, addr, control, &state);
+ if (oldState)
+ {
+ if (kIOReturnSuccess == err)
+ {
+ err = purgeableStateBits(&state);
+ *oldState = state;
+ }
+ }
+ }
+ while (false);
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+ }
+ return (err);
+}
+
+IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnSuccess;
+ vm_purgable_t control;
+ int state;
+
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;