/*
* Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the
- * License may not be used to create, or enable the creation or
- * redistribution of, unlawful or unlicensed copies of an Apple operating
- * system, or to circumvent, violate, or enable the circumvention or
- * violation of, any terms of an Apple operating system software license
- * agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <IOKit/IOLib.h>
#define super IOMemoryDescriptor
OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor)
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithAddress(
- void * /* address */ ,
- IOByteCount /* withLength */ ,
- IODirection /* withDirection */ )
-{
- return false;
-}
-
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithAddress(
- vm_address_t /* address */ ,
- IOByteCount /* withLength */ ,
- IODirection /* withDirection */ ,
- task_t /* withTask */ )
-{
- return false;
-}
-
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithPhysicalAddress(
- IOPhysicalAddress /* address */ ,
- IOByteCount /* withLength */ ,
- IODirection /* withDirection */ )
-{
- return false;
-}
-
-
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithPhysicalRanges(
- IOPhysicalRange * /* ranges */ ,
- UInt32 /* withCount */ ,
- IODirection /* withDirection */ ,
- bool /* asReference */ )
-{
- return false;
-}
-
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-bool IOMultiMemoryDescriptor::initWithRanges(
- IOVirtualRange * /* ranges */ ,
- UInt32 /* withCount */ ,
- IODirection /* withDirection */ ,
- task_t /* withTask */ ,
- bool /* asReference */ )
-{
- return false;
-}
-
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
IOMemoryDescriptor ** descriptors,
UInt32 withCount,
return me;
}
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
bool IOMultiMemoryDescriptor::initWithDescriptors(
IOMemoryDescriptor ** descriptors,
UInt32 withCount,
IODirection withDirection,
bool asReference )
{
+ unsigned index;
+ IOOptionBits copyFlags;
//
// Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
// memory descriptors, that are to be chained end-to-end to make up a single
//
assert(descriptors);
- assert(withCount);
// Release existing descriptors, if any
if ( _descriptors )
_descriptors = 0;
_descriptorsCount = withCount;
_descriptorsIsAllocated = asReference ? false : true;
- _direction = withDirection;
+ _flags = withDirection;
+#ifndef __LP64__
+ _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
+#endif /* !__LP64__ */
_length = 0;
_mappings = 0;
_tag = 0;
/* bytes */ withCount * sizeof(IOMemoryDescriptor *) );
}
- for ( unsigned index = 0; index < withCount; index++ )
+ for ( index = 0; index < withCount; index++ )
{
descriptors[index]->retain();
_length += descriptors[index]->getLength();
if ( _tag == 0 ) _tag = descriptors[index]->getTag();
- assert(descriptors[index]->getDirection() == withDirection);
+ assert(descriptors[index]->getDirection() ==
+ (withDirection & kIOMemoryDirectionMask));
}
+ enum { kCopyFlags = kIOMemoryBufferPageable };
+ copyFlags = 0;
+ for ( index = 0; index < withCount; index++ )
+ {
+ if (!index) copyFlags = (kCopyFlags & descriptors[index]->_flags);
+ else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) break;
+ }
+ if (index < withCount) return (false);
+ _flags |= copyFlags;
+
return true;
}
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
void IOMultiMemoryDescriptor::free()
{
//
super::free();
}
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
IOReturn IOMultiMemoryDescriptor::prepare(IODirection forDirection)
{
//
if ( forDirection == kIODirectionNone )
{
- forDirection = _direction;
+ forDirection = getDirection();
}
for ( index = 0; index < _descriptorsCount; index++ )
if ( status != kIOReturnSuccess )
{
- for ( unsigned indexUndo = 0; indexUndo <= index; indexUndo++ )
+ for ( unsigned indexUndo = 0; indexUndo < index; indexUndo++ )
{
- statusUndo = _descriptors[index]->complete(forDirection);
+ statusUndo = _descriptors[indexUndo]->complete(forDirection);
assert(statusUndo == kIOReturnSuccess);
}
}
return status;
}
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
IOReturn IOMultiMemoryDescriptor::complete(IODirection forDirection)
{
//
if ( forDirection == kIODirectionNone )
{
- forDirection = _direction;
+ forDirection = getDirection();
}
for ( unsigned index = 0; index < _descriptorsCount; index++ )
return statusFinal;
}
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-IOPhysicalAddress IOMultiMemoryDescriptor::getPhysicalSegment(
- IOByteCount offset,
- IOByteCount * length )
+addr64_t IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
+ IOByteCount * length,
+ IOOptionBits options)
{
//
// This method returns the physical address of the byte at the given offset
{
if ( offset < _descriptors[index]->getLength() )
{
- return _descriptors[index]->getPhysicalSegment(offset, length);
+ return _descriptors[index]->getPhysicalSegment(offset, length, options);
}
offset -= _descriptors[index]->getLength();
}
return 0;
}
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+#include "IOKitKernelInternal.h"
-IOPhysicalAddress IOMultiMemoryDescriptor::getSourceSegment(
- IOByteCount offset,
- IOByteCount * length )
+IOReturn IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap,
+ IOVirtualAddress * __address,
+ IOOptionBits options,
+ IOByteCount __offset,
+ IOByteCount __length)
{
- //
- // This method returns the physical address of the byte at the given offset
- // into the memory, and optionally the length of the physically contiguous
- // segment from that offset.
- //
-
- assert(offset <= _length);
-
- for ( unsigned index = 0; index < _descriptorsCount; index++ )
+ IOMemoryMap * mapping = (IOMemoryMap *) *__address;
+ vm_map_t map = mapping->fAddressMap;
+ mach_vm_size_t offset = mapping->fOffset;
+ mach_vm_size_t length = mapping->fLength;
+ mach_vm_address_t address = mapping->fAddress;
+
+ kern_return_t err;
+ IOOptionBits subOptions;
+ mach_vm_size_t mapOffset;
+ mach_vm_size_t bytesRemaining, chunk;
+ mach_vm_address_t nextAddress;
+ IOMemoryDescriptorMapAllocRef ref;
+ vm_prot_t prot;
+
+ do
{
- if ( offset < _descriptors[index]->getLength() )
+ prot = VM_PROT_READ;
+ if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
+
+ if (kIOMapOverwrite & options)
+ {
+ if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ {
+ map = IOPageableMapForAddress(address);
+ }
+ err = KERN_SUCCESS;
+ }
+ else
+ {
+ ref.map = map;
+ ref.tag = IOMemoryTag(map);
+ ref.options = options;
+ ref.size = length;
+ ref.prot = prot;
+ if (options & kIOMapAnywhere)
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ else
+ ref.mapped = mapping->fAddress;
+
+ if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
+ else
+ err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
+
+ if (KERN_SUCCESS != err) break;
+
+ address = ref.mapped;
+ mapping->fAddress = address;
+ }
+
+ mapOffset = offset;
+ bytesRemaining = length;
+ nextAddress = address;
+ assert(mapOffset <= _length);
+ subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
+
+ for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++)
{
- return _descriptors[index]->getSourceSegment(offset, length);
+ chunk = _descriptors[index]->getLength();
+ if (mapOffset >= chunk)
+ {
+ mapOffset -= chunk;
+ continue;
+ }
+ chunk -= mapOffset;
+ if (chunk > bytesRemaining) chunk = bytesRemaining;
+ IOMemoryMap * subMap;
+ subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk );
+ if (!subMap) break;
+ subMap->release(); // kIOMapOverwrite means it will not deallocate
+
+ bytesRemaining -= chunk;
+ nextAddress += chunk;
+ mapOffset = 0;
}
- offset -= _descriptors[index]->getLength();
+ if (bytesRemaining) err = kIOReturnUnderrun;
}
+ while (false);
- if ( length ) *length = 0;
-
- return 0;
-}
-
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ if (kIOReturnSuccess == err)
+ {
+#if IOTRACKING
+ IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
+#endif
+ }
-void * IOMultiMemoryDescriptor::getVirtualSegment( IOByteCount /* offset */ ,
- IOByteCount * /* length */ )
-{
- return 0;
+ return (err);
}
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-IOByteCount IOMultiMemoryDescriptor::readBytes( IOByteCount offset,
- void * bytes,
- IOByteCount withLength )
+IOReturn IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
{
- //
- // Copies data from the memory descriptor's buffer at the given offset, to
- // the specified buffer. Returns the number of bytes copied.
- //
+ IOReturn err;
+ IOOptionBits totalState, state;
- IOByteCount bytesCopied = 0;
- unsigned index;
-
- for ( index = 0; index < _descriptorsCount; index++ )
+ totalState = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnSuccess;
+ for (unsigned index = 0; index < _descriptorsCount; index++)
{
- if ( offset < _descriptors[index]->getLength() ) break;
- offset -= _descriptors[index]->getLength();
+ err = _descriptors[index]->setPurgeable(newState, &state);
+ if (kIOReturnSuccess != err) break;
+
+ if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
+ else if (kIOMemoryPurgeableEmpty == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == totalState) continue;
+ else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
+ else totalState = kIOMemoryPurgeableNonVolatile;
}
+ if (oldState) *oldState = totalState;
- for ( ; index < _descriptorsCount && withLength; index++)
- {
- IOByteCount copy = min(_descriptors[index]->getLength(), withLength);
- IOByteCount copied = _descriptors[index]->readBytes(offset,bytes,copy);
+ return (err);
+}
- bytesCopied += copied;
- if ( copied != copy ) break;
+IOReturn IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
+ IOByteCount * pDirtyPageCount)
+{
+ IOReturn err;
+ IOByteCount totalResidentPageCount, totalDirtyPageCount;
+ IOByteCount residentPageCount, dirtyPageCount;
- bytes = ((UInt8 *) bytes) + copied;
- withLength -= copied;
- offset = 0;
+ err = kIOReturnSuccess;
+ totalResidentPageCount = totalDirtyPageCount = 0;
+ for (unsigned index = 0; index < _descriptorsCount; index++)
+ {
+ err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount);
+ if (kIOReturnSuccess != err) break;
+ totalResidentPageCount += residentPageCount;
+ totalDirtyPageCount += dirtyPageCount;
}
- return bytesCopied;
-}
+ if (pResidentPageCount) *pResidentPageCount = totalResidentPageCount;
+ if (pDirtyPageCount) *pDirtyPageCount = totalDirtyPageCount;
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ return (err);
+}
-IOByteCount IOMultiMemoryDescriptor::writeBytes( IOByteCount offset,
- const void * bytes,
- IOByteCount withLength )
+uint64_t IOMultiMemoryDescriptor::getPreparationID( void )
{
- //
- // Copies data to the memory descriptor's buffer at the given offset, from
- // the specified buffer. Returns the number of bytes copied.
- //
-
- IOByteCount bytesCopied = 0;
- unsigned index;
- for ( index = 0; index < _descriptorsCount; index++ )
+ if (!super::getKernelReserved())
{
- if ( offset < _descriptors[index]->getLength() ) break;
- offset -= _descriptors[index]->getLength();
+ return (kIOPreparationIDUnsupported);
}
- for ( ; index < _descriptorsCount && withLength; index++)
+ for (unsigned index = 0; index < _descriptorsCount; index++)
{
- IOByteCount copy = min(_descriptors[index]->getLength(), withLength);
- IOByteCount copied = _descriptors[index]->writeBytes(offset,bytes,copy);
+ uint64_t preparationID = _descriptors[index]->getPreparationID();
- bytesCopied += copied;
- if ( copied != copy ) break;
+ if ( preparationID == kIOPreparationIDUnsupported )
+ {
+ return (kIOPreparationIDUnsupported);
+ }
- bytes = ((UInt8 *) bytes) + copied;
- withLength -= copied;
- offset = 0;
+ if ( preparationID == kIOPreparationIDUnprepared )
+ {
+ return (kIOPreparationIDUnprepared);
+ }
}
- return bytesCopied;
+ super::setPreparationID();
+
+ return (super::getPreparationID());
}