X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..c6bf4f310a33a9262d455ea4d3f0630b1255e3fe:/iokit/Kernel/IOMultiMemoryDescriptor.cpp?ds=inline

diff --git a/iokit/Kernel/IOMultiMemoryDescriptor.cpp b/iokit/Kernel/IOMultiMemoryDescriptor.cpp
index 224a6bdba..d70531265 100644
--- a/iokit/Kernel/IOMultiMemoryDescriptor.cpp
+++ b/iokit/Kernel/IOMultiMemoryDescriptor.cpp
@@ -2,7 +2,7 @@
  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
@@ -11,10 +11,10 @@
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
@@ -32,380 +32,441 @@
 #define super IOMemoryDescriptor
 OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor)
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithAddress(
-                                  void *      /* address       */ ,
-                                  IOByteCount /* withLength    */ ,
-                                  IODirection /* withDirection */ )
+IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
+	IOMemoryDescriptor * *descriptors,
+	UInt32                withCount,
+	IODirection           withDirection,
+	bool                  asReference )
 {
-    return false;
+	//
+	// Create a new IOMultiMemoryDescriptor.  The "buffer" is made up of several
+	// memory descriptors, that are to be chained end-to-end to make up a single
+	// memory descriptor.
+	//
+	// Passing the ranges as a reference will avoid an extra allocation.
+	//
+
+	IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor;
+
+	if (me && me->initWithDescriptors(
+		    /* descriptors   */ descriptors,
+		    /* withCount     */ withCount,
+		    /* withDirection */ withDirection,
+		    /* asReference   */ asReference ) == false) {
+		me->release();
+		me = NULL;
+	}
+
+	return me;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithAddress(
-                                  vm_address_t /* address       */ ,
-                                  IOByteCount  /* withLength    */ ,
-                                  IODirection  /* withDirection */ ,
-                                  task_t       /* withTask      */ )
+bool
+IOMultiMemoryDescriptor::initWithDescriptors(
+	IOMemoryDescriptor ** descriptors,
+	UInt32                withCount,
+	IODirection           withDirection,
+	bool                  asReference )
 {
-    return false;
+	unsigned index;
+	IOOptionBits copyFlags;
+	//
+	// Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
+	// memory descriptors, that are to be chained end-to-end to make up a single
+	// memory descriptor.
+	//
+	// Passing the ranges as a reference will avoid an extra allocation.
+	//
+
+	assert(descriptors);
+
+	// Release existing descriptors, if any
+	if (_descriptors) {
+		for (unsigned index = 0; index < _descriptorsCount; index++) {
+			_descriptors[index]->release();
+		}
+
+		if (_descriptorsIsAllocated) {
+			IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
+		}
+	} else {
+		// Ask our superclass' opinion.
+		if (super::init() == false) {
+			return false;
+		}
+	}
+
+	// Initialize our minimal state.
+
+	_descriptors            = NULL;
+	_descriptorsCount       = withCount;
+	_descriptorsIsAllocated = asReference ? false : true;
+	_flags                  = withDirection;
+#ifndef __LP64__
+	_direction              = (IODirection) (_flags & kIOMemoryDirectionMask);
+#endif /* !__LP64__ */
+	_length                 = 0;
+	_mappings               = NULL;
+	_tag                    = 0;
+
+	if (asReference) {
+		_descriptors = descriptors;
+	} else {
+		_descriptors = IONew(IOMemoryDescriptor *, withCount);
+		if (_descriptors == NULL) {
+			return false;
+		}
+
+		bcopy( /* from  */ descriptors,
+		    /* to    */ _descriptors,
+		    /* bytes */ withCount * sizeof(IOMemoryDescriptor *));
+	}
+
+	for (index = 0; index < withCount; index++) {
+		descriptors[index]->retain();
+		_length += descriptors[index]->getLength();
+		if (_tag == 0) {
+			_tag = descriptors[index]->getTag();
+		}
+		assert(descriptors[index]->getDirection() ==
+		    (withDirection & kIOMemoryDirectionMask));
+	}
+
+	enum { kCopyFlags = kIOMemoryBufferPageable };
+	copyFlags = 0;
+	for (index = 0; index < withCount; index++) {
+		if (!index) {
+			copyFlags =  (kCopyFlags & descriptors[index]->_flags);
+		} else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) {
+			break;
+		}
+	}
+	if (index < withCount) {
+		return false;
+	}
+	_flags |= copyFlags;
+
+	return true;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithPhysicalAddress(
-                                  IOPhysicalAddress /* address       */ ,
-                                  IOByteCount       /* withLength    */ ,
-                                  IODirection       /* withDirection */ )
+void
+IOMultiMemoryDescriptor::free()
 {
-    return false;
-}
+	//
+	// Free all of this object's outstanding resources.
+	//
 
+	if (_descriptors) {
+		for (unsigned index = 0; index < _descriptorsCount; index++) {
+			_descriptors[index]->release();
+		}
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+		if (_descriptorsIsAllocated) {
+			IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
+		}
+	}
 
-bool IOMultiMemoryDescriptor::initWithPhysicalRanges(
-                                  IOPhysicalRange * /* ranges        */ ,
-                                  UInt32            /* withCount     */ ,
-                                  IODirection       /* withDirection */ ,
-                                  bool              /* asReference   */ )
-{
-    return false;
+	super::free();
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithRanges(
-                                  IOVirtualRange * /* ranges        */ ,
-                                  UInt32           /* withCount     */ ,
-                                  IODirection      /* withDirection */ ,
-                                  task_t           /* withTask      */ ,
-                                  bool             /* asReference   */ )
+IOReturn
+IOMultiMemoryDescriptor::prepare(IODirection forDirection)
 {
-    return false;
+	//
+	// Prepare the memory for an I/O transfer.
+	//
+	// This involves paging in the memory and wiring it down for the duration
+	// of the transfer.  The complete() method finishes the processing of the
+	// memory after the I/O transfer finishes.
+	//
+
+	unsigned index;
+	IOReturn status = kIOReturnInternalError;
+	IOReturn statusUndo;
+
+	if (forDirection == kIODirectionNone) {
+		forDirection = getDirection();
+	}
+
+	for (index = 0; index < _descriptorsCount; index++) {
+		status = _descriptors[index]->prepare(forDirection);
+		if (status != kIOReturnSuccess) {
+			break;
+		}
+	}
+
+	if (status != kIOReturnSuccess) {
+		for (unsigned indexUndo = 0; indexUndo < index; indexUndo++) {
+			statusUndo = _descriptors[indexUndo]->complete(forDirection);
+			assert(statusUndo == kIOReturnSuccess);
+		}
+	}
+
+	return status;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
-                                  IOMemoryDescriptor ** descriptors,
-                                  UInt32                withCount,
-                                  IODirection           withDirection,
-                                  bool                  asReference )
+IOReturn
+IOMultiMemoryDescriptor::complete(IODirection forDirection)
 {
-    //
-    // Create a new IOMultiMemoryDescriptor.  The "buffer" is made up of several
-    // memory descriptors, that are to be chained end-to-end to make up a single
-    // memory descriptor.
-    //
-    // Passing the ranges as a reference will avoid an extra allocation.
-    //
-
-    IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor;
-    
-    if ( me && me->initWithDescriptors(
-                                  /* descriptors   */ descriptors,
-                                  /* withCount     */ withCount,
-                                  /* withDirection */ withDirection,
-                                  /* asReference   */ asReference ) == false )
-    {
-	    me->release();
-	    me = 0;
-    }
-
-    return me;
+	//
+	// Complete processing of the memory after an I/O transfer finishes.
+	//
+	// This method shouldn't be called unless a prepare() was previously issued;
+	// the prepare() and complete() must occur in pairs, before and after an I/O
+	// transfer.
+	//
+
+	IOReturn status;
+	IOReturn statusFinal = kIOReturnSuccess;
+
+	if (forDirection == kIODirectionNone) {
+		forDirection = getDirection();
+	}
+
+	for (unsigned index = 0; index < _descriptorsCount; index++) {
+		status = _descriptors[index]->complete(forDirection);
+		if (status != kIOReturnSuccess) {
+			statusFinal = status;
+		}
+		assert(status == kIOReturnSuccess);
+	}
+
+	return statusFinal;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithDescriptors(
-                                  IOMemoryDescriptor ** descriptors,
-                                  UInt32                withCount,
-                                  IODirection           withDirection,
-                                  bool                  asReference )
+addr64_t
+IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount   offset,
+    IOByteCount * length,
+    IOOptionBits  options)
 {
-    //
-    // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
-    // memory descriptors, that are to be chained end-to-end to make up a single
-    // memory descriptor.
-    //
-    // Passing the ranges as a reference will avoid an extra allocation.
-    //
-
-    assert(descriptors);
-    assert(withCount);
-
-    // Release existing descriptors, if any
-    if ( _descriptors )
-    {
-        for ( unsigned index = 0; index < _descriptorsCount; index++ ) 
-            _descriptors[index]->release();
-
-        if ( _descriptorsIsAllocated )
-            IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
-    } else {
-        // Ask our superclass' opinion.
-        if ( super::init() == false )  return false;
-    }
-    
-    // Initialize our minimal state.
-
-    _descriptors            = 0;
-    _descriptorsCount       = withCount;
-    _descriptorsIsAllocated = asReference ? false : true;
-    _direction              = withDirection;
-    _length                 = 0;
-    _mappings               = 0;
-    _tag                    = 0;
-
-    if ( asReference )
-    {
-        _descriptors = descriptors;
-    }
-    else
-    {
-        _descriptors = IONew(IOMemoryDescriptor *, withCount);
-        if ( _descriptors == 0 )  return false;
-
-        bcopy( /* from  */ descriptors,
-               /* to    */ _descriptors,
-               /* bytes */ withCount * sizeof(IOMemoryDescriptor *) );
-    }
-
-    for ( unsigned index = 0; index < withCount; index++ ) 
-    {
-        descriptors[index]->retain();
-        _length += descriptors[index]->getLength();
-        if ( _tag == 0 )  _tag = descriptors[index]->getTag();
-        assert(descriptors[index]->getDirection() == withDirection);
-    }
-
-    return true;
+	//
+	// This method returns the physical address of the byte at the given offset
+	// into the memory,  and optionally the length of the physically contiguous
+	// segment from that offset.
+	//
+
+	assert(offset <= _length);
+
+	for (unsigned index = 0; index < _descriptorsCount; index++) {
+		if (offset < _descriptors[index]->getLength()) {
+			return _descriptors[index]->getPhysicalSegment(offset, length, options);
+		}
+		offset -= _descriptors[index]->getLength();
+	}
+
+	if (length) {
+		*length = 0;
+	}
+
+	return 0;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+#include "IOKitKernelInternal.h"
 
-void IOMultiMemoryDescriptor::free()
+IOReturn
+IOMultiMemoryDescriptor::doMap(vm_map_t           __addressMap,
+    IOVirtualAddress *  __address,
+    IOOptionBits       options,
+    IOByteCount        __offset,
+    IOByteCount        __length)
 {
-    //
-    // Free all of this object's outstanding resources.
-    //
-
-    if ( _descriptors )
-    {
-        for ( unsigned index = 0; index < _descriptorsCount; index++ ) 
-            _descriptors[index]->release();
-
-        if ( _descriptorsIsAllocated )
-            IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
-    }
-
-    super::free();
+	IOMemoryMap *     mapping = (IOMemoryMap *) *__address;
+	vm_map_t          map     = mapping->fAddressMap;
+	mach_vm_size_t    offset  = mapping->fOffset;
+	mach_vm_size_t    length  = mapping->fLength;
+	mach_vm_address_t address = mapping->fAddress;
+
+	kern_return_t     err;
+	IOOptionBits      subOptions;
+	mach_vm_size_t    mapOffset;
+	mach_vm_size_t    bytesRemaining, chunk;
+	mach_vm_address_t nextAddress;
+	IOMemoryDescriptorMapAllocRef ref;
+	vm_prot_t                     prot;
+
+	do{
+		prot = VM_PROT_READ;
+		if (!(kIOMapReadOnly & options)) {
+			prot |= VM_PROT_WRITE;
+		}
+
+		if (kIOMapOverwrite & options) {
+			if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
+				map = IOPageableMapForAddress(address);
+			}
+			err = KERN_SUCCESS;
+		} else {
+			ref.map     = map;
+			ref.tag     = IOMemoryTag(map);
+			ref.options = options;
+			ref.size    = length;
+			ref.prot    = prot;
+			if (options & kIOMapAnywhere) {
+				// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+				ref.mapped = 0;
+			} else {
+				ref.mapped = mapping->fAddress;
+			}
+
+			if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
+				err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
+			} else {
+				err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
+			}
+
+			if (KERN_SUCCESS != err) {
+				break;
+			}
+
+			address = ref.mapped;
+			mapping->fAddress = address;
+		}
+
+		mapOffset = offset;
+		bytesRemaining = length;
+		nextAddress = address;
+		assert(mapOffset <= _length);
+		subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
+
+		for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++) {
+			chunk = _descriptors[index]->getLength();
+			if (mapOffset >= chunk) {
+				mapOffset -= chunk;
+				continue;
+			}
+			chunk -= mapOffset;
+			if (chunk > bytesRemaining) {
+				chunk = bytesRemaining;
+			}
+			IOMemoryMap * subMap;
+			subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk );
+			if (!subMap) {
+				break;
+			}
+			subMap->release(); // kIOMapOverwrite means it will not deallocate
+
+			bytesRemaining -= chunk;
+			nextAddress += chunk;
+			mapOffset = 0;
+		}
+		if (bytesRemaining) {
+			err = kIOReturnUnderrun;
+		}
+	}while (false);
+
+	if (kIOReturnSuccess == err) {
+#if IOTRACKING
+		IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
+#endif
+	}
+
+	return err;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-IOReturn IOMultiMemoryDescriptor::prepare(IODirection forDirection)
+IOReturn
+IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
+    IOOptionBits * oldState )
 {
-    //
-    // Prepare the memory for an I/O transfer.
-    //
-    // This involves paging in the memory and wiring it down for the duration
-    // of the transfer.  The complete() method finishes the processing of the
-    // memory after the I/O transfer finishes.
-    //
-
-    unsigned index;
-    IOReturn status = kIOReturnInternalError;
-    IOReturn statusUndo;
-
-    if ( forDirection == kIODirectionNone )
-    {
-        forDirection = _direction;
-    }
-
-    for ( index = 0; index < _descriptorsCount; index++ ) 
-    {
-        status = _descriptors[index]->prepare(forDirection);
-        if ( status != kIOReturnSuccess )  break;
-    }
-
-    if ( status != kIOReturnSuccess )
-    {
-        for ( unsigned indexUndo = 0; indexUndo <= index; indexUndo++ )
-        {
-            statusUndo = _descriptors[index]->complete(forDirection);
-            assert(statusUndo == kIOReturnSuccess);
-        }
-    }
-
-    return status;
+	IOReturn     err;
+	IOOptionBits totalState, state;
+
+	totalState = kIOMemoryPurgeableNonVolatile;
+	err = kIOReturnSuccess;
+	for (unsigned index = 0; index < _descriptorsCount; index++) {
+		err = _descriptors[index]->setPurgeable(newState, &state);
+		if (kIOReturnSuccess != err) {
+			break;
+		}
+
+		if (kIOMemoryPurgeableEmpty == state) {
+			totalState = kIOMemoryPurgeableEmpty;
+		} else if (kIOMemoryPurgeableEmpty == totalState) {
+			continue;
+		} else if (kIOMemoryPurgeableVolatile == totalState) {
+			continue;
+		} else if (kIOMemoryPurgeableVolatile == state) {
+			totalState = kIOMemoryPurgeableVolatile;
+		} else {
+			totalState = kIOMemoryPurgeableNonVolatile;
+		}
+	}
+	if (oldState) {
+		*oldState = totalState;
+	}
+
+	return err;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-IOReturn IOMultiMemoryDescriptor::complete(IODirection forDirection)
+IOReturn
+IOMultiMemoryDescriptor::setOwnership( task_t newOwner,
+    int newLedgerTag,
+    IOOptionBits newLedgerOptions )
 {
-    //
-    // Complete processing of the memory after an I/O transfer finishes.
-    //
-    // This method shouldn't be called unless a prepare() was previously issued;
-    // the prepare() and complete() must occur in pairs, before and after an I/O
-    // transfer.
-    //
-
-    IOReturn status;
-    IOReturn statusFinal = kIOReturnSuccess;
-
-    if ( forDirection == kIODirectionNone )
-    {
-        forDirection = _direction;
-    }
-
-    for ( unsigned index = 0; index < _descriptorsCount; index++ ) 
-    {
-        status = _descriptors[index]->complete(forDirection);
-        if ( status != kIOReturnSuccess )  statusFinal = status;
-        assert(status == kIOReturnSuccess);
-    }
-
-    return statusFinal;
-}
+	IOReturn     err;
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+	if (iokit_iomd_setownership_enabled == FALSE) {
+		return kIOReturnUnsupported;
+	}
 
-IOPhysicalAddress IOMultiMemoryDescriptor::getPhysicalSegment(
-                                                       IOByteCount   offset,
-                                                       IOByteCount * length )
-{
-    //
-    // This method returns the physical address of the byte at the given offset
-    // into the memory,  and optionally the length of the physically contiguous
-    // segment from that offset.
-    //
-
-    assert(offset <= _length);
-
-    for ( unsigned index = 0; index < _descriptorsCount; index++ ) 
-    {
-        if ( offset < _descriptors[index]->getLength() )
-        {
-            return _descriptors[index]->getPhysicalSegment(offset, length);
-        }
-        offset -= _descriptors[index]->getLength();
-    }
-
-    if ( length )  *length = 0;
-
-    return 0;
-}
+	err = kIOReturnSuccess;
+	for (unsigned index = 0; index < _descriptorsCount; index++) {
+		err = _descriptors[index]->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
+		if (kIOReturnSuccess != err) {
+			break;
+		}
+	}
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+	return err;
+}
 
-IOPhysicalAddress IOMultiMemoryDescriptor::getSourceSegment(
-                                                       IOByteCount   offset,
-                                                       IOByteCount * length )
+IOReturn
+IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
+    IOByteCount * pDirtyPageCount)
 {
-    //
-    // This method returns the physical address of the byte at the given offset
-    // into the memory,  and optionally the length of the physically contiguous
-    // segment from that offset.
-    //
-
-    assert(offset <= _length);
-
-    for ( unsigned index = 0; index < _descriptorsCount; index++ ) 
-    {
-        if ( offset < _descriptors[index]->getLength() )
-        {
-            return _descriptors[index]->getSourceSegment(offset, length);
-        }
-        offset -= _descriptors[index]->getLength();
-    }
-
-    if ( length )  *length = 0;
-
-    return 0;
+	IOReturn    err;
+	IOByteCount totalResidentPageCount, totalDirtyPageCount;
+	IOByteCount residentPageCount, dirtyPageCount;
+
+	err = kIOReturnSuccess;
+	totalResidentPageCount = totalDirtyPageCount = 0;
+	for (unsigned index = 0; index < _descriptorsCount; index++) {
+		err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount);
+		if (kIOReturnSuccess != err) {
+			break;
+		}
+		totalResidentPageCount += residentPageCount;
+		totalDirtyPageCount    += dirtyPageCount;
+	}
+
+	if (pResidentPageCount) {
+		*pResidentPageCount = totalResidentPageCount;
+	}
+	if (pDirtyPageCount) {
+		*pDirtyPageCount = totalDirtyPageCount;
+	}
+
+	return err;
 }
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-void * IOMultiMemoryDescriptor::getVirtualSegment( IOByteCount   /* offset */ ,
-                                                   IOByteCount * /* length */ )
+uint64_t
+IOMultiMemoryDescriptor::getPreparationID( void )
 {
-    return 0;
-}
+	if (!super::getKernelReserved()) {
+		return kIOPreparationIDUnsupported;
+	}
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+	for (unsigned index = 0; index < _descriptorsCount; index++) {
+		uint64_t preparationID = _descriptors[index]->getPreparationID();
 
-IOByteCount IOMultiMemoryDescriptor::readBytes( IOByteCount offset,
-                                                void *      bytes,
-                                                IOByteCount withLength )
-{
-    //
-    // Copies data from the memory descriptor's buffer at the given offset, to
-    // the specified buffer.  Returns the number of bytes copied.
-    //
-
-    IOByteCount bytesCopied = 0;
-    unsigned    index;
-
-    for ( index = 0; index < _descriptorsCount; index++ ) 
-    {
-        if ( offset < _descriptors[index]->getLength() )  break;
-        offset -= _descriptors[index]->getLength();
-    }
-
-    for ( ; index < _descriptorsCount && withLength; index++)
-    {
-        IOByteCount copy   = min(_descriptors[index]->getLength(), withLength);
-        IOByteCount copied = _descriptors[index]->readBytes(offset,bytes,copy);
-
-        bytesCopied += copied;
-        if ( copied != copy )  break;
-
-        bytes = ((UInt8 *) bytes) + copied;
-        withLength -= copied;
-        offset = 0;
-    }
-
-    return bytesCopied;
-}
+		if (preparationID == kIOPreparationIDUnsupported) {
+			return kIOPreparationIDUnsupported;
+		}
 
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+		if (preparationID == kIOPreparationIDUnprepared) {
+			return kIOPreparationIDUnprepared;
+		}
+	}
 
-IOByteCount IOMultiMemoryDescriptor::writeBytes( IOByteCount  offset,
-                                                 const void * bytes,
-                                                 IOByteCount  withLength )
-{
-    //
-    // Copies data to the memory descriptor's buffer at the given offset, from
-    // the specified buffer.  Returns the number of bytes copied.
-    //
-
-    IOByteCount bytesCopied = 0;
-    unsigned    index;
-
-    for ( index = 0; index < _descriptorsCount; index++ ) 
-    {
-        if ( offset < _descriptors[index]->getLength() )  break;
-        offset -= _descriptors[index]->getLength();
-    }
-
-    for ( ; index < _descriptorsCount && withLength; index++)
-    {
-        IOByteCount copy   = min(_descriptors[index]->getLength(), withLength);
-        IOByteCount copied = _descriptors[index]->writeBytes(offset,bytes,copy);
-
-        bytesCopied += copied;
-        if ( copied != copy )  break;
-
-        bytes = ((UInt8 *) bytes) + copied;
-        withLength -= copied;
-        offset = 0;
-    }
-
-    return bytesCopied;
+	super::setPreparationID();
+
+	return super::getPreparationID();
 }