#include <IOKit/IOKitDebug.h>
#include <libkern/c++/OSContainers.h>
+#include <libkern/c++/OSDictionary.h>
+#include <libkern/c++/OSArray.h>
+#include <libkern/c++/OSSymbol.h>
+#include <libkern/c++/OSNumber.h>
#include <sys/cdefs.h>
__BEGIN_DECLS
#include <vm/pmap.h>
+#include <device/device_port.h>
+void bcopy_phys(char *from, char *to, int size);
void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
- vm_prot_t prot, boolean_t wired);
+ vm_prot_t prot, unsigned int flags, boolean_t wired);
+#ifndef i386
+struct phys_entry *pmap_find_physentry(vm_offset_t pa);
+#endif
void ipc_port_release_send(ipc_port_t port);
vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset);
+
+memory_object_t
+device_pager_setup(
+ memory_object_t pager,
+ int device_handle,
+ vm_size_t size,
+ int flags);
+void
+device_pager_deallocate(
+ memory_object_t);
+kern_return_t
+device_pager_populate_object(
+ memory_object_t pager,
+ vm_object_offset_t offset,
+ vm_offset_t phys_addr,
+ vm_size_t size);
+
+/*
+ * Page fault handling based on vm_map (or entries therein)
+ */
+extern kern_return_t vm_fault(
+ vm_map_t map,
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+ boolean_t change_wiring,
+ int interruptible,
+ pmap_t caller_pmap,
+ vm_offset_t caller_pmap_addr);
+
__END_DECLS
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-OSDefineMetaClass( IOMemoryDescriptor, OSObject )
-OSDefineAbstractStructors( IOMemoryDescriptor, OSObject )
+OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
#define super IOMemoryDescriptor
OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
-extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address );
+extern "C" {
+
+vm_map_t IOPageableMapForAddress( vm_address_t address );
+
+typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
+
+kern_return_t IOIteratePageableMaps(vm_size_t size,
+ IOIteratePageableMapsCallback callback, void * ref);
+
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static IORecursiveLock * gIOMemoryLock;
+
+#define LOCK IORecursiveLockLock( gIOMemoryLock)
+#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
+#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
+#define WAKEUP \
+ IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
return( get_task_map( task ));
}
+inline vm_offset_t pmap_extract_safe(task_t task, vm_offset_t va)
+{
+ vm_offset_t pa = pmap_extract(get_task_pmap(task), va);
+
+ if ( pa == 0 )
+ {
+ pa = vm_map_get_phys_page(get_task_map(task), trunc_page(va));
+ if ( pa ) pa += va - trunc_page(va);
+ }
+
+ return pa;
+}
+
+inline void bcopy_phys_safe(char * from, char * to, int size)
+{
+ boolean_t enabled = ml_set_interrupts_enabled(FALSE);
+
+ bcopy_phys(from, to, size);
+
+ ml_set_interrupts_enabled(enabled);
+}
+
+#define next_page(a) ( trunc_page(a) + page_size )
+
+
+extern "C" {
+
+kern_return_t device_data_action(
+ int device_handle,
+ ipc_port_t device_pager,
+ vm_prot_t protection,
+ vm_object_offset_t offset,
+ vm_size_t size)
+{
+ struct ExpansionData {
+ void * devicePager;
+ unsigned int pagerContig:1;
+ unsigned int unused:31;
+ IOMemoryDescriptor * memory;
+ };
+ kern_return_t kr;
+ ExpansionData * ref = (ExpansionData *) device_handle;
+ IOMemoryDescriptor * memDesc;
+
+ LOCK;
+ memDesc = ref->memory;
+ if( memDesc)
+ kr = memDesc->handleFault( device_pager, 0, 0,
+ offset, size, kIOMapDefaultCache /*?*/);
+ else
+ kr = KERN_ABORTED;
+ UNLOCK;
+
+ return( kr );
+}
+
+kern_return_t device_close(
+ int device_handle)
+{
+ struct ExpansionData {
+ void * devicePager;
+ unsigned int pagerContig:1;
+ unsigned int unused:31;
+ IOMemoryDescriptor * memory;
+ };
+ ExpansionData * ref = (ExpansionData *) device_handle;
+
+ IODelete( ref, ExpansionData, 1 );
+
+ return( kIOReturnSuccess );
+}
+
+}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
*/
void IOGeneralMemoryDescriptor::free()
{
+ LOCK;
+ if( reserved)
+ reserved->memory = 0;
+ UNLOCK;
+
while (_wireCount)
complete();
if (_kernPtrAligned)
unmapFromKernel();
if (_ranges.v && _rangesIsAllocated)
IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+
+ if( reserved && reserved->devicePager)
+ device_pager_deallocate( reserved->devicePager );
+
+ // memEntry holds a ref on the device pager which owns reserved (ExpansionData)
+ // so no reserved access after this point
if( _memEntry)
ipc_port_release_send( (ipc_port_t) _memEntry );
super::free();
}
-void IOGeneralMemoryDescriptor::unmapFromKernel()
-{
- kern_return_t krtn;
- vm_offset_t off;
- // Pull the shared pages out of the task map
- // Do we need to unwire it first?
- for ( off = 0; off < _kernSize; off += page_size )
- {
- pmap_change_wiring(
- kernel_pmap,
- _kernPtrAligned + off,
- FALSE);
-
- pmap_remove(
- kernel_pmap,
- _kernPtrAligned + off,
- _kernPtrAligned + off + page_size);
- }
- // Free the former shmem area in the task
- krtn = vm_deallocate(kernel_map,
- _kernPtrAligned,
- _kernSize );
- assert(krtn == KERN_SUCCESS);
- _kernPtrAligned = 0;
-}
-
-void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
-{
- kern_return_t krtn;
- vm_offset_t off;
-
- if (_kernPtrAligned)
- {
- if (_kernPtrAtIndex == rangeIndex) return;
- unmapFromKernel();
- assert(_kernPtrAligned == 0);
- }
-
- vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
-
- _kernSize = trunc_page(_ranges.v[rangeIndex].address +
- _ranges.v[rangeIndex].length +
- page_size - 1) - srcAlign;
-
- /* Find some memory of the same size in kernel task. We use vm_allocate()
- to do this. vm_allocate inserts the found memory object in the
- target task's map as a side effect. */
- krtn = vm_allocate( kernel_map,
- &_kernPtrAligned,
- _kernSize,
- VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit
- assert(krtn == KERN_SUCCESS);
- if(krtn) return;
-
- /* For each page in the area allocated from the kernel map,
- find the physical address of the page.
- Enter the page in the target task's pmap, at the
- appropriate target task virtual address. */
- for ( off = 0; off < _kernSize; off += page_size )
- {
- vm_offset_t kern_phys_addr, phys_addr;
- if( _task)
- phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off );
- else
- phys_addr = srcAlign + off;
- assert(phys_addr);
- if(phys_addr == 0) return;
-
- // Check original state.
- kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off );
- // Set virtual page to point to the right physical one
- pmap_enter(
- kernel_pmap,
- _kernPtrAligned + off,
- phys_addr,
- VM_PROT_READ|VM_PROT_WRITE,
- TRUE);
- }
- _kernPtrAtIndex = rangeIndex;
-}
+/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
+/* DEPRECATED */ {
+/* DEPRECATED */ kern_return_t krtn;
+/* DEPRECATED */ vm_offset_t off;
+/* DEPRECATED */ // Pull the shared pages out of the task map
+/* DEPRECATED */ // Do we need to unwire it first?
+/* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size )
+/* DEPRECATED */ {
+/* DEPRECATED */ pmap_change_wiring(
+/* DEPRECATED */ kernel_pmap,
+/* DEPRECATED */ _kernPtrAligned + off,
+/* DEPRECATED */ FALSE);
+/* DEPRECATED */
+/* DEPRECATED */ pmap_remove(
+/* DEPRECATED */ kernel_pmap,
+/* DEPRECATED */ _kernPtrAligned + off,
+/* DEPRECATED */ _kernPtrAligned + off + page_size);
+/* DEPRECATED */ }
+/* DEPRECATED */ // Free the former shmem area in the task
+/* DEPRECATED */ krtn = vm_deallocate(kernel_map,
+/* DEPRECATED */ _kernPtrAligned,
+/* DEPRECATED */ _kernSize );
+/* DEPRECATED */ assert(krtn == KERN_SUCCESS);
+/* DEPRECATED */ _kernPtrAligned = 0;
+/* DEPRECATED */ }
+/* DEPRECATED */
+/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
+/* DEPRECATED */ {
+/* DEPRECATED */ kern_return_t krtn;
+/* DEPRECATED */ vm_offset_t off;
+/* DEPRECATED */
+/* DEPRECATED */ if (_kernPtrAligned)
+/* DEPRECATED */ {
+/* DEPRECATED */ if (_kernPtrAtIndex == rangeIndex) return;
+/* DEPRECATED */ unmapFromKernel();
+/* DEPRECATED */ assert(_kernPtrAligned == 0);
+/* DEPRECATED */ }
+/* DEPRECATED */
+/* DEPRECATED */ vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
+/* DEPRECATED */
+/* DEPRECATED */ _kernSize = trunc_page(_ranges.v[rangeIndex].address +
+/* DEPRECATED */ _ranges.v[rangeIndex].length +
+/* DEPRECATED */ page_size - 1) - srcAlign;
+/* DEPRECATED */
+/* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */
+/* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */
+/* DEPRECATED */ /* target task's map as a side effect. */
+/* DEPRECATED */ krtn = vm_allocate( kernel_map,
+/* DEPRECATED */ &_kernPtrAligned,
+/* DEPRECATED */ _kernSize,
+/* DEPRECATED */ VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit
+/* DEPRECATED */ assert(krtn == KERN_SUCCESS);
+/* DEPRECATED */ if(krtn) return;
+/* DEPRECATED */
+/* DEPRECATED */ /* For each page in the area allocated from the kernel map, */
+/* DEPRECATED */ /* find the physical address of the page. */
+/* DEPRECATED */ /* Enter the page in the target task's pmap, at the */
+/* DEPRECATED */ /* appropriate target task virtual address. */
+/* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size )
+/* DEPRECATED */ {
+/* DEPRECATED */ vm_offset_t kern_phys_addr, phys_addr;
+/* DEPRECATED */ if( _task)
+/* DEPRECATED */ phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off );
+/* DEPRECATED */ else
+/* DEPRECATED */ phys_addr = srcAlign + off;
+/* DEPRECATED */ assert(phys_addr);
+/* DEPRECATED */ if(phys_addr == 0) return;
+/* DEPRECATED */
+/* DEPRECATED */ // Check original state.
+/* DEPRECATED */ kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off );
+/* DEPRECATED */ // Set virtual page to point to the right physical one
+/* DEPRECATED */ pmap_enter(
+/* DEPRECATED */ kernel_pmap,
+/* DEPRECATED */ _kernPtrAligned + off,
+/* DEPRECATED */ phys_addr,
+/* DEPRECATED */ VM_PROT_READ|VM_PROT_WRITE,
+/* DEPRECATED */ VM_WIMG_USE_DEFAULT,
+/* DEPRECATED */ TRUE);
+/* DEPRECATED */ }
+/* DEPRECATED */ _kernPtrAtIndex = rangeIndex;
+/* DEPRECATED */ }
/*
* getDirection:
return( _tag);
}
-/*
- * setPosition
- *
- * Set the logical start position inside the client buffer.
- *
- * It is convention that the position reflect the actual byte count that
- * is successfully transferred into or out of the buffer, before the I/O
- * request is "completed" (ie. sent back to its originator).
- */
-
-void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
+IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
+ IOByteCount * length )
{
- assert(position <= _length);
+ IOPhysicalAddress physAddr = 0;
- if (position >= _length)
- {
- _position = _length;
- _positionAtIndex = _rangesCount; /* careful: out-of-bounds */
- _positionAtOffset = 0;
- return;
- }
-
- if (position < _position)
- {
- _positionAtOffset = position;
- _positionAtIndex = 0;
- }
- else
- {
- _positionAtOffset += (position - _position);
+ if( prepare() == kIOReturnSuccess) {
+ physAddr = getPhysicalSegment( offset, length );
+ complete();
}
- _position = position;
- while (_positionAtOffset >= _ranges.v[_positionAtIndex].length)
- {
- _positionAtOffset -= _ranges.v[_positionAtIndex].length;
- _positionAtIndex++;
- }
+ return( physAddr );
}
-/*
- * readBytes:
- *
- * Copy data from the memory descriptor's buffer into the specified buffer,
- * relative to the current position. The memory descriptor's position is
- * advanced based on the number of bytes copied.
- */
-
-IOByteCount IOGeneralMemoryDescriptor::readBytes(IOByteCount offset,
- void * bytes, IOByteCount withLength)
+IOByteCount IOMemoryDescriptor::readBytes( IOByteCount offset,
+ void * bytes,
+ IOByteCount withLength )
{
- IOByteCount bytesLeft;
- void * segment;
- IOByteCount segmentLength;
+ IOByteCount bytesCopied = 0;
- if( offset != _position)
- setPosition( offset );
-
- withLength = min(withLength, _length - _position);
- bytesLeft = withLength;
+ assert(offset <= _length);
+ assert(offset <= _length - withLength);
-#if 0
- while (bytesLeft && (_position < _length))
+ if ( offset < _length )
{
- /* Compute the relative length to the end of this virtual segment. */
- segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft);
-
- /* Compute the relative address of this virtual segment. */
- segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
-
- if (KERN_SUCCESS != vm_map_read_user(getMapForTask(_task, segment),
- /* from */ (vm_offset_t) segment, /* to */ (vm_offset_t) bytes,
- /* size */ segmentLength))
- {
- assert( false );
- bytesLeft = withLength;
- break;
- }
- bytesLeft -= segmentLength;
- offset += segmentLength;
- setPosition(offset);
- }
-#else
- while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength)))
- {
- segmentLength = min(segmentLength, bytesLeft);
- bcopy(/* from */ segment, /* to */ bytes, /* size */ segmentLength);
- bytesLeft -= segmentLength;
- offset += segmentLength;
- bytes = (void *) (((UInt32) bytes) + segmentLength);
- }
-#endif
+ withLength = min(withLength, _length - offset);
- return withLength - bytesLeft;
-}
+ while ( withLength ) // (process another source segment?)
+ {
+ IOPhysicalAddress sourceSegment;
+ IOByteCount sourceSegmentLength;
-/*
- * writeBytes:
- *
- * Copy data to the memory descriptor's buffer from the specified buffer,
- * relative to the current position. The memory descriptor's position is
- * advanced based on the number of bytes copied.
- */
-IOByteCount IOGeneralMemoryDescriptor::writeBytes(IOByteCount offset,
- const void* bytes,IOByteCount withLength)
-{
- IOByteCount bytesLeft;
- void * segment;
- IOByteCount segmentLength;
+ sourceSegment = getPhysicalSegment(offset, &sourceSegmentLength);
+ if ( sourceSegment == 0 ) goto readBytesErr;
- if( offset != _position)
- setPosition( offset );
+ sourceSegmentLength = min(sourceSegmentLength, withLength);
- withLength = min(withLength, _length - _position);
- bytesLeft = withLength;
+ while ( sourceSegmentLength ) // (process another target segment?)
+ {
+ IOPhysicalAddress targetSegment;
+ IOByteCount targetSegmentLength;
-#if 0
- while (bytesLeft && (_position < _length))
- {
- assert(_position <= _length);
+ targetSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes);
+ if ( targetSegment == 0 ) goto readBytesErr;
- /* Compute the relative length to the end of this virtual segment. */
- segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft);
+ targetSegmentLength = min(next_page(targetSegment) - targetSegment, sourceSegmentLength);
- /* Compute the relative address of this virtual segment. */
- segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
+ if ( sourceSegment + targetSegmentLength > next_page(sourceSegment) )
+ {
+ IOByteCount pageLength;
+
+ pageLength = next_page(sourceSegment) - sourceSegment;
+
+ bcopy_phys_safe( /* from */ (char *) sourceSegment,
+ /* to */ (char *) targetSegment,
+ /* size */ (int ) pageLength );
+
+ ((UInt8 *) bytes) += pageLength;
+ bytesCopied += pageLength;
+ offset += pageLength;
+ sourceSegment += pageLength;
+ sourceSegmentLength -= pageLength;
+ targetSegment += pageLength;
+ targetSegmentLength -= pageLength;
+ withLength -= pageLength;
+ }
- if (KERN_SUCCESS != vm_map_write_user(getMapForTask(_task, segment),
- /* from */ (vm_offset_t) bytes,
- /* to */ (vm_offset_t) segment,
- /* size */ segmentLength))
- {
- assert( false );
- bytesLeft = withLength;
- break;
- }
- bytesLeft -= segmentLength;
- offset += segmentLength;
- setPosition(offset);
+ bcopy_phys_safe( /* from */ (char *) sourceSegment,
+ /* to */ (char *) targetSegment,
+ /* size */ (int ) targetSegmentLength );
+
+ ((UInt8 *) bytes) += targetSegmentLength;
+ bytesCopied += targetSegmentLength;
+ offset += targetSegmentLength;
+ sourceSegment += targetSegmentLength;
+ sourceSegmentLength -= targetSegmentLength;
+ withLength -= targetSegmentLength;
+ }
+ }
}
-#else
- while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength)))
+
+readBytesErr:
+
+ if ( bytesCopied )
{
- segmentLength = min(segmentLength, bytesLeft);
- bcopy(/* from */ bytes, /* to */ segment, /* size */ segmentLength);
- // Flush cache in case we're copying code around, eg. handling a code page fault
- IOFlushProcessorCache(kernel_task, (vm_offset_t) segment, segmentLength );
-
- bytesLeft -= segmentLength;
- offset += segmentLength;
- bytes = (void *) (((UInt32) bytes) + segmentLength);
+ // We mark the destination pages as modified, just
+ // in case they are made pageable later on in life.
+
+ pmap_modify_pages( /* pmap */ kernel_pmap,
+ /* start */ trunc_page(((vm_offset_t) bytes) - bytesCopied),
+ /* end */ round_page(((vm_offset_t) bytes)) );
}
-#endif
- return withLength - bytesLeft;
+ return bytesCopied;
}
-/*
- * getPhysicalSegment:
- *
- * Get the physical address of the buffer, relative to the current position.
- * If the current position is at the end of the buffer, a zero is returned.
- */
-IOPhysicalAddress
-IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
- IOByteCount * lengthOfSegment)
+IOByteCount IOMemoryDescriptor::writeBytes( IOByteCount offset,
+ const void * bytes,
+ IOByteCount withLength )
{
- vm_address_t virtualAddress;
- IOByteCount virtualLength;
- pmap_t virtualPMap;
- IOPhysicalAddress physicalAddress;
- IOPhysicalLength physicalLength;
+ IOByteCount bytesCopied = 0;
- if( kIOMemoryRequiresWire & _flags)
- assert( _wireCount );
+ assert(offset <= _length);
+ assert(offset <= _length - withLength);
- if ((0 == _task) && (1 == _rangesCount))
+ if ( offset < _length )
{
- assert(offset <= _length);
- if (offset >= _length)
- {
- physicalAddress = 0;
- physicalLength = 0;
- }
- else
- {
- physicalLength = _length - offset;
- physicalAddress = offset + _ranges.v[0].address;
- }
+ withLength = min(withLength, _length - offset);
- if (lengthOfSegment)
- *lengthOfSegment = physicalLength;
- return physicalAddress;
- }
+ while ( withLength ) // (process another target segment?)
+ {
+ IOPhysicalAddress targetSegment;
+ IOByteCount targetSegmentLength;
- if( offset != _position)
- setPosition( offset );
+ targetSegment = getPhysicalSegment(offset, &targetSegmentLength);
+ if ( targetSegment == 0 ) goto writeBytesErr;
- assert(_position <= _length);
+ targetSegmentLength = min(targetSegmentLength, withLength);
- /* Fail gracefully if the position is at (or past) the end-of-buffer. */
- if (_position >= _length)
- {
- *lengthOfSegment = 0;
- return 0;
- }
+ while ( targetSegmentLength ) // (process another source segment?)
+ {
+ IOPhysicalAddress sourceSegment;
+ IOByteCount sourceSegmentLength;
- /* Prepare to compute the largest contiguous physical length possible. */
+ sourceSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes);
+ if ( sourceSegment == 0 ) goto writeBytesErr;
- virtualAddress = _ranges.v[_positionAtIndex].address + _positionAtOffset;
- virtualLength = _ranges.v[_positionAtIndex].length - _positionAtOffset;
- vm_address_t virtualPage = trunc_page(virtualAddress);
- if( _task)
- virtualPMap = get_task_pmap(_task);
- else
- virtualPMap = 0;
+ sourceSegmentLength = min(next_page(sourceSegment) - sourceSegment, targetSegmentLength);
- physicalAddress = (virtualAddress == _cachedVirtualAddress) ?
- _cachedPhysicalAddress : /* optimization */
- virtualPMap ?
- pmap_extract(virtualPMap, virtualAddress) :
- virtualAddress;
- physicalLength = trunc_page(physicalAddress) + page_size - physicalAddress;
+ if ( targetSegment + sourceSegmentLength > next_page(targetSegment) )
+ {
+ IOByteCount pageLength;
- if (!physicalAddress && _task)
- {
- physicalAddress =
- vm_map_get_phys_page(get_task_map(_task), virtualPage);
- physicalAddress += virtualAddress - virtualPage;
- }
+ pageLength = next_page(targetSegment) - targetSegment;
- if (physicalAddress == 0) /* memory must be wired in order to proceed */
- {
- assert(physicalAddress);
- *lengthOfSegment = 0;
- return 0;
+ bcopy_phys_safe( /* from */ (char *) sourceSegment,
+ /* to */ (char *) targetSegment,
+ /* size */ (int ) pageLength );
+
+ // We flush the data cache in case it is code we've copied,
+ // such that the instruction cache is in the know about it.
+
+ flush_dcache(targetSegment, pageLength, true);
+
+ ((UInt8 *) bytes) += pageLength;
+ bytesCopied += pageLength;
+ offset += pageLength;
+ sourceSegment += pageLength;
+ sourceSegmentLength -= pageLength;
+ targetSegment += pageLength;
+ targetSegmentLength -= pageLength;
+ withLength -= pageLength;
+ }
+
+ bcopy_phys_safe( /* from */ (char *) sourceSegment,
+ /* to */ (char *) targetSegment,
+ /* size */ (int ) sourceSegmentLength );
+
+ // We flush the data cache in case it is code we've copied,
+ // such that the instruction cache is in the know about it.
+
+ flush_dcache(targetSegment, sourceSegmentLength, true);
+
+ ((UInt8 *) bytes) += sourceSegmentLength;
+ bytesCopied += sourceSegmentLength;
+ offset += sourceSegmentLength;
+ targetSegment += sourceSegmentLength;
+ targetSegmentLength -= sourceSegmentLength;
+ withLength -= sourceSegmentLength;
+ }
+ }
}
- /* Compute the largest contiguous physical length possible, within range. */
- IOPhysicalAddress physicalPage = trunc_page(physicalAddress);
+writeBytesErr:
- while (physicalLength < virtualLength)
+ return bytesCopied;
+}
+
+extern "C" {
+// osfmk/device/iokit_rpc.c
+extern unsigned int IOTranslateCacheBits(struct phys_entry *pp);
+};
+
+/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
+/* DEPRECATED */ {
+/* DEPRECATED */ assert(position <= _length);
+/* DEPRECATED */
+/* DEPRECATED */ if (position >= _length)
+/* DEPRECATED */ {
+/* DEPRECATED */ _position = _length;
+/* DEPRECATED */ _positionAtIndex = _rangesCount; /* careful: out-of-bounds */
+/* DEPRECATED */ _positionAtOffset = 0;
+/* DEPRECATED */ return;
+/* DEPRECATED */ }
+/* DEPRECATED */
+/* DEPRECATED */ if (position < _position)
+/* DEPRECATED */ {
+/* DEPRECATED */ _positionAtOffset = position;
+/* DEPRECATED */ _positionAtIndex = 0;
+/* DEPRECATED */ }
+/* DEPRECATED */ else
+/* DEPRECATED */ {
+/* DEPRECATED */ _positionAtOffset += (position - _position);
+/* DEPRECATED */ }
+/* DEPRECATED */ _position = position;
+/* DEPRECATED */
+/* DEPRECATED */ while (_positionAtOffset >= _ranges.v[_positionAtIndex].length)
+/* DEPRECATED */ {
+/* DEPRECATED */ _positionAtOffset -= _ranges.v[_positionAtIndex].length;
+/* DEPRECATED */ _positionAtIndex++;
+/* DEPRECATED */ }
+/* DEPRECATED */ }
+
+IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
+ IOByteCount * lengthOfSegment )
+{
+ IOPhysicalAddress address = 0;
+ IOPhysicalLength length = 0;
+
+
+// assert(offset <= _length);
+
+ if ( offset < _length ) // (within bounds?)
{
- physicalPage += page_size;
- virtualPage += page_size;
- _cachedVirtualAddress = virtualPage;
- _cachedPhysicalAddress = virtualPMap ?
- pmap_extract(virtualPMap, virtualPage) :
- virtualPage;
- if (!_cachedPhysicalAddress && _task)
- {
- _cachedPhysicalAddress =
- vm_map_get_phys_page(get_task_map(_task), virtualPage);
- }
+ unsigned rangesIndex = 0;
- if (_cachedPhysicalAddress != physicalPage) break;
+ for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
+ {
+ offset -= _ranges.v[rangesIndex].length; // (make offset relative)
+ }
- physicalLength += page_size;
- }
+ if ( _task == 0 ) // (physical memory?)
+ {
+ address = _ranges.v[rangesIndex].address + offset;
+ length = _ranges.v[rangesIndex].length - offset;
- /* Clip contiguous physical length at the end of this range. */
- if (physicalLength > virtualLength)
- physicalLength = virtualLength;
+ for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
+ {
+ if ( address + length != _ranges.v[rangesIndex].address ) break;
- if( lengthOfSegment)
- *lengthOfSegment = physicalLength;
+ length += _ranges.v[rangesIndex].length; // (coalesce ranges)
+ }
+ }
+ else // (virtual memory?)
+ {
+ vm_address_t addressVirtual = _ranges.v[rangesIndex].address + offset;
- return physicalAddress;
-}
+ assert((0 == (kIOMemoryRequiresWire & _flags)) || _wireCount);
+ address = pmap_extract_safe(_task, addressVirtual);
+ length = next_page(addressVirtual) - addressVirtual;
+ length = min(_ranges.v[rangesIndex].length - offset, length);
+ }
-/*
- * getVirtualSegment:
- *
- * Get the virtual address of the buffer, relative to the current position.
- * If the memory wasn't mapped into the caller's address space, it will be
- * mapped in now. If the current position is at the end of the buffer, a
- * null is returned.
- */
-void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
- IOByteCount * lengthOfSegment)
+ assert(address);
+ if ( address == 0 ) length = 0;
+ }
+
+ if ( lengthOfSegment ) *lengthOfSegment = length;
+
+ return address;
+}
+
+IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset,
+ IOByteCount * lengthOfSegment )
{
- if( offset != _position)
- setPosition( offset );
+ IOPhysicalAddress address = 0;
+ IOPhysicalLength length = 0;
- assert(_position <= _length);
+ assert(offset <= _length);
- /* Fail gracefully if the position is at (or past) the end-of-buffer. */
- if (_position >= _length)
+ if ( offset < _length ) // (within bounds?)
{
- *lengthOfSegment = 0;
- return 0;
- }
+ unsigned rangesIndex = 0;
- /* Compute the relative length to the end of this virtual segment. */
- *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset;
+ for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
+ {
+ offset -= _ranges.v[rangesIndex].length; // (make offset relative)
+ }
- /* Compute the relative address of this virtual segment. */
- if (_task == kernel_task)
- return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
- else
- {
- vm_offset_t off;
+ address = _ranges.v[rangesIndex].address + offset;
+ length = _ranges.v[rangesIndex].length - offset;
- mapIntoKernel(_positionAtIndex);
+ for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
+ {
+ if ( address + length != _ranges.v[rangesIndex].address ) break;
- off = _ranges.v[_kernPtrAtIndex].address;
- off -= trunc_page(off);
+ length += _ranges.v[rangesIndex].length; // (coalesce ranges)
+ }
- return (void *) (_kernPtrAligned + off + _positionAtOffset);
+ assert(address);
+ if ( address == 0 ) length = 0;
}
-}
+
+ if ( lengthOfSegment ) *lengthOfSegment = length;
+
+ return address;
+}
+
+/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
+/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+/* DEPRECATED */ IOByteCount * lengthOfSegment)
+/* DEPRECATED */ {
+/* DEPRECATED */ if( offset != _position)
+/* DEPRECATED */ setPosition( offset );
+/* DEPRECATED */
+/* DEPRECATED */ assert(_position <= _length);
+/* DEPRECATED */
+/* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */
+/* DEPRECATED */ if (_position >= _length)
+/* DEPRECATED */ {
+/* DEPRECATED */ *lengthOfSegment = 0;
+/* DEPRECATED */ return 0;
+/* DEPRECATED */ }
+/* DEPRECATED */
+/* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */
+/* DEPRECATED */ *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset;
+/* DEPRECATED */
+/* DEPRECATED */ /* Compute the relative address of this virtual segment. */
+/* DEPRECATED */ if (_task == kernel_task)
+/* DEPRECATED */ return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
+/* DEPRECATED */ else
+/* DEPRECATED */ {
+/* DEPRECATED */ vm_offset_t off;
+/* DEPRECATED */
+/* DEPRECATED */ mapIntoKernel(_positionAtIndex);
+/* DEPRECATED */
+/* DEPRECATED */ off = _ranges.v[_kernPtrAtIndex].address;
+/* DEPRECATED */ off -= trunc_page(off);
+/* DEPRECATED */
+/* DEPRECATED */ return (void *) (_kernPtrAligned + off + _positionAtOffset);
+/* DEPRECATED */ }
+/* DEPRECATED */ }
+/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
/*
* prepare
if(forDirection == kIODirectionNone)
forDirection = _direction;
- vm_prot_t access = VM_PROT_DEFAULT; // Could be cleverer using direction
+ vm_prot_t access;
+
+ switch (forDirection)
+ {
+ case kIODirectionIn:
+ access = VM_PROT_WRITE;
+ break;
+
+ case kIODirectionOut:
+ access = VM_PROT_READ;
+ break;
+
+ default:
+ access = VM_PROT_READ | VM_PROT_WRITE;
+ break;
+ }
//
// Check user read/write access to the data buffer.
vm_map_t taskVMMap = getMapForTask(_task, srcAlign);
- rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE);
- if (KERN_SUCCESS != rc) {
- IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc);
- goto abortExit;
- }
-
// If this I/O is for a user land task then protect ourselves
// against COW and other vm_shenanigans
if (_task && _task != kernel_task) {
} while (desiredSize);
}
}
+
+ rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE);
+ if (KERN_SUCCESS != rc) {
+ IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc);
+ goto abortExit;
+ }
}
}
_wireCount++;
_memoryEntries->release();
_memoryEntries = 0;
}
-
- _cachedVirtualAddress = 0;
}
return kIOReturnSuccess;
}
IOByteCount length = 0 )
{
kern_return_t kr;
+ ipc_port_t sharedMem = (ipc_port_t) _memEntry;
// mapping source == dest? (could be much better)
if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
return( kIOReturnSuccess );
}
- if( _task && _memEntry && (_flags & kIOMemoryRequiresWire)) {
+ if( 0 == sharedMem) {
- do {
+ vm_size_t size = 0;
- if( (1 != _rangesCount)
- || (kIOMapDefaultCache != (options & kIOMapCacheMask)) ) {
- kr = kIOReturnUnsupported;
- continue;
+ for (unsigned index = 0; index < _rangesCount; index++)
+ size += round_page(_ranges.v[index].address + _ranges.v[index].length)
+ - trunc_page(_ranges.v[index].address);
+
+ if( _task) {
+#ifndef i386
+ vm_size_t actualSize = size;
+ kr = mach_make_memory_entry( get_task_map(_task),
+ &actualSize, _ranges.v[0].address,
+ VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
+ NULL );
+
+ if( (KERN_SUCCESS == kr) && (actualSize != round_page(size))) {
+#if IOASSERT
+ IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n",
+ _ranges.v[0].address, (UInt32)actualSize, size);
+#endif
+ kr = kIOReturnVMError;
+ ipc_port_release_send( sharedMem );
}
- if( 0 == length)
- length = getLength();
- if( (sourceOffset + length) > _ranges.v[0].length) {
- kr = kIOReturnBadArgument;
- continue;
+ if( KERN_SUCCESS != kr)
+#endif /* i386 */
+ sharedMem = MACH_PORT_NULL;
+
+ } else do {
+
+ memory_object_t pager;
+ unsigned int flags=0;
+ struct phys_entry *pp;
+ IOPhysicalAddress pa;
+ IOPhysicalLength segLen;
+
+ pa = getPhysicalSegment( sourceOffset, &segLen );
+
+ if( !reserved) {
+ reserved = IONew( ExpansionData, 1 );
+ if( !reserved)
+ continue;
}
+ reserved->pagerContig = (1 == _rangesCount);
+ reserved->memory = this;
- ipc_port_t sharedMem = (ipc_port_t) _memEntry;
- vm_prot_t prot = VM_PROT_READ
- | ((options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
+#ifndef i386
+ switch(options & kIOMapCacheMask ) { /*What cache mode do we need*/
- // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
- if( options & kIOMapAnywhere)
- *atAddress = 0;
+ case kIOMapDefaultCache:
+ default:
+ if((pp = pmap_find_physentry(pa))) {/* Find physical address */
+ /* Use physical attributes as default */
+ flags = IOTranslateCacheBits(pp);
+
+ }
+ else { /* If no physical, just hard code attributes */
+ flags = DEVICE_PAGER_CACHE_INHIB |
+ DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
+ }
+ break;
+
+ case kIOMapInhibitCache:
+ flags = DEVICE_PAGER_CACHE_INHIB |
+ DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
+ break;
+
+ case kIOMapWriteThruCache:
+ flags = DEVICE_PAGER_WRITE_THROUGH |
+ DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
+ break;
+
+ case kIOMapCopybackCache:
+ flags = DEVICE_PAGER_COHERENT;
+ break;
+ }
+
+ flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
+#else
+ flags = reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
+#endif
+
+ pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
+ size, flags);
+ assert( pager );
+
+ if( pager) {
+ kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
+ size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
+
+ assert( KERN_SUCCESS == kr );
+ if( KERN_SUCCESS != kr) {
+ device_pager_deallocate( pager );
+ pager = MACH_PORT_NULL;
+ sharedMem = MACH_PORT_NULL;
+ }
+ }
+ if( pager && sharedMem)
+ reserved->devicePager = pager;
+ else {
+ IODelete( reserved, ExpansionData, 1 );
+ reserved = 0;
+ }
- if( 0 == sharedMem)
- kr = kIOReturnVMError;
- else
- kr = KERN_SUCCESS;
-
- if( KERN_SUCCESS == kr)
- kr = vm_map( addressMap,
- atAddress,
- length, 0 /* mask */,
- (( options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
- | VM_MAKE_TAG(VM_MEMORY_IOKIT),
- sharedMem, sourceOffset,
- false, // copy
- prot, // cur
- prot, // max
- VM_INHERIT_NONE);
-
} while( false );
- } else
- kr = super::doMap( addressMap, atAddress,
+ _memEntry = (void *) sharedMem;
+ }
+
+#ifndef i386
+ if( 0 == sharedMem)
+ kr = kIOReturnVMError;
+ else
+#endif
+ kr = super::doMap( addressMap, atAddress,
options, sourceOffset, length );
+
return( kr );
}
// osfmk/device/iokit_rpc.c
extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
vm_size_t length, unsigned int mapFlags);
+extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
};
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-static IORecursiveLock * gIOMemoryLock;
-
-#define LOCK IORecursiveLockLock( gIOMemoryLock)
-#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
-OSDefineMetaClass( IOMemoryMap, OSObject )
-OSDefineAbstractStructors( IOMemoryMap, OSObject )
+/* inline function implementation */
+IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
+ { return( getPhysicalSegment( 0, 0 )); }
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
vm_map_t addressMap;
IOOptionBits options;
-public:
+protected:
+ virtual void taggedRelease(const void *tag = 0) const;
virtual void free();
+public:
+
// IOMemoryMap methods
virtual IOVirtualAddress getVirtualAddress();
virtual IOByteCount getLength();
IOByteCount * length);
// for IOMemoryDescriptor use
- _IOMemoryMap * isCompatible(
+ _IOMemoryMap * copyCompatible(
IOMemoryDescriptor * owner,
task_t intoTask,
IOVirtualAddress toAddress,
IOByteCount offset,
IOByteCount length );
- bool init(
+ bool initCompatible(
IOMemoryDescriptor * memory,
IOMemoryMap * superMap,
IOByteCount offset,
IOByteCount length );
- bool init(
+ bool initWithDescriptor(
IOMemoryDescriptor * memory,
task_t intoTask,
IOVirtualAddress toAddress,
IOOptionBits options,
IOByteCount offset,
IOByteCount length );
+
+ IOReturn redirect(
+ task_t intoTask, bool redirect );
};
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-bool _IOMemoryMap::init(
+bool _IOMemoryMap::initCompatible(
IOMemoryDescriptor * _memory,
IOMemoryMap * _superMap,
IOByteCount _offset,
return( true );
}
-bool _IOMemoryMap::init(
+bool _IOMemoryMap::initWithDescriptor(
IOMemoryDescriptor * _memory,
task_t intoTask,
IOVirtualAddress toAddress,
addressMap = get_task_map(intoTask);
if( !addressMap)
return( false);
- kernel_vm_map_reference(addressMap);
+ vm_map_reference(addressMap);
_memory->retain();
memory = _memory;
options, offset, length ));
if( !ok) {
logical = 0;
- _memory->release();
+ memory->release();
+ memory = 0;
vm_map_deallocate(addressMap);
addressMap = 0;
}
return( ok );
}
+struct IOMemoryDescriptorMapAllocRef
+{
+ ipc_port_t sharedMem;
+ vm_size_t size;
+ vm_offset_t mapped;
+ IOByteCount sourceOffset;
+ IOOptionBits options;
+};
+
+static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
+{
+ IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
+ IOReturn err;
+
+ do {
+ if( ref->sharedMem) {
+ vm_prot_t prot = VM_PROT_READ
+ | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
+
+ err = vm_map( map,
+ &ref->mapped,
+ ref->size, 0 /* mask */,
+ (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
+ | VM_MAKE_TAG(VM_MEMORY_IOKIT),
+ ref->sharedMem, ref->sourceOffset,
+ false, // copy
+ prot, // cur
+ prot, // max
+ VM_INHERIT_NONE);
+
+ if( KERN_SUCCESS != err) {
+ ref->mapped = 0;
+ continue;
+ }
+
+ } else {
+
+ err = vm_allocate( map, &ref->mapped, ref->size,
+ ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
+ | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
+
+ if( KERN_SUCCESS != err) {
+ ref->mapped = 0;
+ continue;
+ }
+
+ // we have to make sure that these guys don't get copied if we fork.
+ err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
+ assert( KERN_SUCCESS == err );
+ }
+
+ } while( false );
+
+ return( err );
+}
+
+
IOReturn IOMemoryDescriptor::doMap(
vm_map_t addressMap,
IOVirtualAddress * atAddress,
IOByteCount length = 0 )
{
IOReturn err = kIOReturnSuccess;
- vm_size_t ourSize;
- vm_size_t bytes;
- vm_offset_t mapped;
+ memory_object_t pager;
vm_address_t logical;
IOByteCount pageOffset;
- IOPhysicalLength segLen;
- IOPhysicalAddress physAddr;
+ IOPhysicalAddress sourceAddr;
+ IOMemoryDescriptorMapAllocRef ref;
- if( 0 == length)
- length = getLength();
+ ref.sharedMem = (ipc_port_t) _memEntry;
+ ref.sourceOffset = sourceOffset;
+ ref.options = options;
- physAddr = getPhysicalSegment( sourceOffset, &segLen );
- assert( physAddr );
+ do {
- pageOffset = physAddr - trunc_page( physAddr );
- ourSize = length + pageOffset;
- physAddr -= pageOffset;
+ if( 0 == length)
+ length = getLength();
- logical = *atAddress;
- if( 0 == (options & kIOMapAnywhere)) {
- mapped = trunc_page( logical );
- if( (logical - mapped) != pageOffset)
- err = kIOReturnVMError;
- }
- if( kIOReturnSuccess == err)
- err = vm_allocate( addressMap, &mapped, ourSize,
- ((options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
- | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
+ sourceAddr = getSourceSegment( sourceOffset, NULL );
+ assert( sourceAddr );
+ pageOffset = sourceAddr - trunc_page( sourceAddr );
+
+ ref.size = round_page( length + pageOffset );
+
+ logical = *atAddress;
+ if( options & kIOMapAnywhere)
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ else {
+ ref.mapped = trunc_page( logical );
+ if( (logical - ref.mapped) != pageOffset) {
+ err = kIOReturnVMError;
+ continue;
+ }
+ }
+
+ if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags))
+ err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
+ else
+ err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
+
+ if( err != KERN_SUCCESS)
+ continue;
+
+ if( reserved)
+ pager = (memory_object_t) reserved->devicePager;
+ else
+ pager = MACH_PORT_NULL;
+
+ if( !ref.sharedMem || pager )
+ err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
+
+ } while( false );
+
+ if( err != KERN_SUCCESS) {
+ if( ref.mapped)
+ doUnmap( addressMap, ref.mapped, ref.size );
+ *atAddress = NULL;
+ } else
+ *atAddress = ref.mapped + pageOffset;
+
+ return( err );
+}
+
+enum {
+ kIOMemoryRedirected = 0x00010000
+};
+
+IOReturn IOMemoryDescriptor::handleFault(
+ void * _pager,
+ vm_map_t addressMap,
+ IOVirtualAddress address,
+ IOByteCount sourceOffset,
+ IOByteCount length,
+ IOOptionBits options )
+{
+ IOReturn err = kIOReturnSuccess;
+ memory_object_t pager = (memory_object_t) _pager;
+ vm_size_t size;
+ vm_size_t bytes;
+ vm_size_t page;
+ IOByteCount pageOffset;
+ IOPhysicalLength segLen;
+ IOPhysicalAddress physAddr;
+
+ if( !addressMap) {
- if( err) {
+ if( kIOMemoryRedirected & _flags) {
#ifdef DEBUG
- kprintf("IOMemoryDescriptor::doMap: vm_allocate() "
- "returned %08x\n", err);
+ IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
#endif
- return( err);
- }
+ do {
+ SLEEP;
+ } while( kIOMemoryRedirected & _flags );
+ }
- // we have to make sure that these guys don't get copied if we fork.
- err = vm_inherit( addressMap, mapped, ourSize, VM_INHERIT_NONE);
- if( err != KERN_SUCCESS) {
- doUnmap( addressMap, mapped, ourSize); // back out
- return( err);
+ return( kIOReturnSuccess );
}
- logical = mapped;
- *atAddress = mapped + pageOffset;
+ physAddr = getPhysicalSegment( sourceOffset, &segLen );
+ assert( physAddr );
+ pageOffset = physAddr - trunc_page( physAddr );
+
+ size = length + pageOffset;
+ physAddr -= pageOffset;
segLen += pageOffset;
- bytes = ourSize;
+ bytes = size;
do {
// in the middle of the loop only map whole pages
if( segLen >= bytes)
#ifdef DEBUG
if( kIOLogMapping & gIOKitDebug)
- kprintf("_IOMemoryMap::map(%x) %08x->%08x:%08x\n",
- addressMap, mapped + pageOffset, physAddr + pageOffset,
+ IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n",
+ addressMap, address + pageOffset, physAddr + pageOffset,
segLen - pageOffset);
#endif
- if( kIOReturnSuccess == err)
- err = IOMapPages( addressMap, mapped, physAddr, segLen, options );
+
+
+
+
+#ifdef i386
+ /* i386 doesn't support faulting on device memory yet */
+ if( addressMap && (kIOReturnSuccess == err))
+ err = IOMapPages( addressMap, address, physAddr, segLen, options );
+ assert( KERN_SUCCESS == err );
if( err)
break;
+#endif
+
+ if( pager) {
+ if( reserved && reserved->pagerContig) {
+ IOPhysicalLength allLen;
+ IOPhysicalAddress allPhys;
+ allPhys = getPhysicalSegment( 0, &allLen );
+ assert( allPhys );
+ err = device_pager_populate_object( pager, 0, trunc_page(allPhys), round_page(allLen) );
+
+ } else {
+
+ for( page = 0;
+ (page < segLen) && (KERN_SUCCESS == err);
+ page += page_size) {
+ err = device_pager_populate_object( pager, sourceOffset + page,
+ physAddr + page, page_size );
+ }
+ }
+ assert( KERN_SUCCESS == err );
+ if( err)
+ break;
+ }
+#ifndef i386
+ /* *** ALERT *** */
+ /* *** Temporary Workaround *** */
+
+ /* This call to vm_fault causes an early pmap level resolution */
+ /* of the mappings created above. Need for this is in absolute */
+ /* violation of the basic tenet that the pmap layer is a cache. */
+ /* Further, it implies a serious I/O architectural violation on */
+ /* the part of some user of the mapping. As of this writing, */
+ /* the call to vm_fault is needed because the NVIDIA driver */
+ /* makes a call to pmap_extract. The NVIDIA driver needs to be */
+ /* fixed as soon as possible. The NVIDIA driver should not */
+ /* need to query for this info as it should know from the doMap */
+ /* call where the physical memory is mapped. When a query is */
+ /* necessary to find a physical mapping, it should be done */
+ /* through an iokit call which includes the mapped memory */
+ /* handle. This is required for machine architecture independence.*/
+
+ if(!(kIOMemoryRedirected & _flags)) {
+ vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0);
+ }
+
+ /* *** Temporary Workaround *** */
+ /* *** ALERT *** */
+#endif
sourceOffset += segLen - pageOffset;
- mapped += segLen;
+ address += segLen;
bytes -= segLen;
pageOffset = 0;
if( bytes)
err = kIOReturnBadArgument;
- if( err)
- doUnmap( addressMap, logical, ourSize );
- else
- mapped = true;
return( err );
}
addressMap, logical, length );
#endif
- if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task())))
+ if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))) {
+
+ if( _memEntry && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags))
+ addressMap = IOPageableMapForAddress( logical );
+
err = vm_deallocate( addressMap, logical, length );
+
+ } else
+ err = kIOReturnSuccess;
+
+ return( err );
+}
+
+IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
+{
+ IOReturn err;
+ _IOMemoryMap * mapping = 0;
+ OSIterator * iter;
+
+ LOCK;
+
+ do {
+ if( (iter = OSCollectionIterator::withCollection( _mappings))) {
+ while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
+ mapping->redirect( safeTask, redirect );
+
+ iter->release();
+ }
+ } while( false );
+
+ if( redirect)
+ _flags |= kIOMemoryRedirected;
+ else {
+ _flags &= ~kIOMemoryRedirected;
+ WAKEUP;
+ }
+
+ UNLOCK;
+
+ // temporary binary compatibility
+ IOSubMemoryDescriptor * subMem;
+ if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
+ err = subMem->redirect( safeTask, redirect );
else
err = kIOReturnSuccess;
return( err );
}
+IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
+{
+ return( _parent->redirect( safeTask, redirect ));
+}
+
+IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
+{
+ IOReturn err = kIOReturnSuccess;
+
+ if( superMap) {
+// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
+ } else {
+
+ LOCK;
+ if( logical && addressMap
+ && (get_task_map( safeTask) != addressMap)
+ && (0 == (options & kIOMapStatic))) {
+
+ IOUnmapPages( addressMap, logical, length );
+ if( !redirect) {
+ err = vm_deallocate( addressMap, logical, length );
+ err = memory->doMap( addressMap, &logical,
+ (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
+ offset, length );
+ } else
+ err = kIOReturnSuccess;
+#ifdef DEBUG
+ IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap);
+#endif
+ }
+ UNLOCK;
+ }
+
+ return( err );
+}
+
IOReturn _IOMemoryMap::unmap( void )
{
IOReturn err;
UNLOCK;
}
+// Overload the release mechanism. All mappings must be a member
+// of a memory descriptors _mappings set. This means that we
+// always have 2 references on a mapping. When either of these mappings
+// are released we need to free ourselves.
+void _IOMemoryMap::taggedRelease(const void *tag = 0) const
+{
+ super::taggedRelease(tag, 2);
+}
+
void _IOMemoryMap::free()
{
unmap();
return( memory );
}
-_IOMemoryMap * _IOMemoryMap::isCompatible(
+_IOMemoryMap * _IOMemoryMap::copyCompatible(
IOMemoryDescriptor * owner,
task_t task,
IOVirtualAddress toAddress,
if( (!task) || (task != getAddressTask()))
return( 0 );
- if( (options ^ _options) & (kIOMapCacheMask | kIOMapReadOnly))
+ if( (options ^ _options) & kIOMapReadOnly)
+ return( 0 );
+ if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
+ && ((options ^ _options) & kIOMapCacheMask))
return( 0 );
if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
} else {
mapping = new _IOMemoryMap;
if( mapping
- && !mapping->init( owner, this, _offset, _length )) {
+ && !mapping->initCompatible( owner, this, _offset, _length )) {
mapping->release();
mapping = 0;
}
LOCK;
if( map
- && !map->init( this, intoTask, mapAddress,
+ && !map->initWithDescriptor( this, intoTask, mapAddress,
options | kIOMapStatic, 0, getLength() )) {
map->release();
map = 0;
while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
- if( (mapping = mapping->isCompatible(
+ if( (mapping = mapping->copyCompatible(
owner, intoTask, toAddress,
options | kIOMapReference,
offset, length )))
mapping = new _IOMemoryMap;
if( mapping
- && !mapping->init( owner, intoTask, toAddress, options,
+ && !mapping->initWithDescriptor( owner, intoTask, toAddress, options,
offset, length )) {
-
+#ifdef DEBUG
IOLog("Didn't make map %08lx : %08lx\n", offset, length );
+#endif
mapping->release();
mapping = 0;
}
if( mapping) {
if( 0 == _mappings)
_mappings = OSSet::withCapacity(1);
- if( _mappings && _mappings->setObject( mapping ))
- mapping->release(); /* really */
+ if( _mappings )
+ _mappings->setObject( mapping );
}
}
void IOMemoryDescriptor::removeMapping(
IOMemoryMap * mapping )
{
- if( _mappings) {
- mapping->retain();
- mapping->retain();
+ if( _mappings)
_mappings->removeObject( mapping);
- }
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
return( address );
}
+IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
+ IOByteCount * length )
+{
+ IOPhysicalAddress address;
+ IOByteCount actualLength;
+
+ assert(offset <= _length);
+
+ if( length)
+ *length = 0;
+
+ if( offset >= _length)
+ return( 0 );
+
+ address = _parent->getSourceSegment( offset + _start, &actualLength );
+
+ if( address && length)
+ *length = min( _length - offset, actualLength );
+
+ return( address );
+}
+
void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
IOByteCount * lengthOfSegment)
{
options | kIOMapReference,
_start + offset, length );
+ if( !mapping)
+ mapping = (IOMemoryMap *) _parent->makeMapping(
+ _parent, intoTask,
+ toAddress,
+ options, _start + offset, length );
+
if( !mapping)
mapping = super::makeMapping( owner, intoTask, toAddress, options,
offset, length );
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 0);
+bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
+{
+ OSSymbol const *keys[2];
+ OSObject *values[2];
+ OSDictionary *dict;
+ IOVirtualRange *vcopy;
+ unsigned int index, nRanges;
+ bool result;
+
+ if (s == NULL) return false;
+ if (s->previouslySerialized(this)) return true;
+
+ // Pretend we are an array.
+ if (!s->addXMLStartTag(this, "array")) return false;
+
+ nRanges = _rangesCount;
+ vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges);
+ if (vcopy == 0) return false;
+
+ keys[0] = OSSymbol::withCString("address");
+ keys[1] = OSSymbol::withCString("length");
+
+ result = false;
+ values[0] = values[1] = 0;
+
+ // From this point on we can go to bail.
+
+ // Copy the volatile data so we don't have to allocate memory
+ // while the lock is held.
+ LOCK;
+ if (nRanges == _rangesCount) {
+ for (index = 0; index < nRanges; index++) {
+ vcopy[index] = _ranges.v[index];
+ }
+ } else {
+ // The descriptor changed out from under us. Give up.
+ UNLOCK;
+ result = false;
+ goto bail;
+ }
+ UNLOCK;
+
+ for (index = 0; index < nRanges; index++)
+ {
+ values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8);
+ if (values[0] == 0) {
+ result = false;
+ goto bail;
+ }
+ values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8);
+ if (values[1] == 0) {
+ result = false;
+ goto bail;
+ }
+ OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
+ if (dict == 0) {
+ result = false;
+ goto bail;
+ }
+ values[0]->release();
+ values[1]->release();
+ values[0] = values[1] = 0;
+
+ result = dict->serialize(s);
+ dict->release();
+ if (!result) {
+ goto bail;
+ }
+ }
+ result = s->addXMLEndTag("array");
+
+ bail:
+ if (values[0])
+ values[0]->release();
+ if (values[1])
+ values[1]->release();
+ if (keys[0])
+ keys[0]->release();
+ if (keys[1])
+ keys[1]->release();
+ if (vcopy)
+ IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
+ return result;
+}
+
+bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
+{
+ if (!s) {
+ return (false);
+ }
+ if (s->previouslySerialized(this)) return true;
+
+ // Pretend we are a dictionary.
+ // We must duplicate the functionality of OSDictionary here
+ // because otherwise object references will not work;
+ // they are based on the value of the object passed to
+ // previouslySerialized and addXMLStartTag.
+
+ if (!s->addXMLStartTag(this, "dict")) return false;
+
+ char const *keys[3] = {"offset", "length", "parent"};
+
+ OSObject *values[3];
+ values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
+ if (values[0] == 0)
+ return false;
+ values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
+ if (values[1] == 0) {
+ values[0]->release();
+ return false;
+ }
+ values[2] = _parent;
+
+ bool result = true;
+ for (int i=0; i<3; i++) {
+ if (!s->addString("<key>") ||
+ !s->addString(keys[i]) ||
+ !s->addXMLEndTag("key") ||
+ !values[i]->serialize(s)) {
+ result = false;
+ break;
+ }
+ }
+ values[0]->release();
+ values[1]->release();
+ if (!result) {
+ return false;
+ }
+
+ return s->addXMLEndTag("dict");
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
+
+/* inline function implementation */
+IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
+ { return( getPhysicalSegment( 0, 0 )); }